text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# ----------------------------------------------------------------------
# Copyright (c) 2016, The Regents of the University of California All
# rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of The Regents of the University of California
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE
# UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
# ----------------------------------------------------------------------
# Filename: IP.py
# Version: 0.1
# Description: Defines the IP object, which is anything that has resources
# in an OpenCL Board Support Package
# Author: Dustin Richmond
# Import Python Utilities
import xml.etree.ElementTree as ET
import abc, sys
import Tinker
class IP(dict):
_C_RESOURCE_TYPES = set(["alms", "ffs", "rams", "dsps"])
def __init__(self, e):
"""
Construct a generic IP object that encapsulates a dictionary
Arguments:
e -- An element tree element containing the description of this
object
"""
d = self.parse(e)
self.validate(d)
self.update(d)
self.__is_claimed = False
def __claim(self):
if(self.__is_claimed):
#TODO: IP must be constructed with type
sys.exit("Error: IP has already been" % self["type"]
+" claimed")
self.__is_claimed = True
def configure(self, d):
"""
Configure this object according to a high level description
fill in any missing defaults, and verify that the description
can be implemented
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
self.__claim()
self.validate(self)
@abc.abstractmethod
def parse(cls, e):
"""
Parse the description of this IP object from an element tree
element and return a dictionary with the parameters
found.
Arguments:
e -- An element tree element containing the description of this
object
"""
@classmethod
def validate(cls, d):
"""
Validate the parameters that describe the intrinsic settings of
this IP
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
pass
@abc.abstractmethod
def verify(cls, d):
"""
Check a user-description to ensure that this IP object can
implement the desired settings.
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
pass
@abc.abstractmethod
def get_macros(self,s):
return []
def construct(e):
t = e.tag
if(t == "memory"):
import Memory
return Memory.Memory(e)
else:
print "In XML Element:"
print ET.tostring(e)
sys.exit("Unknown IP Type %s" % t)
def parse_string(e, k):
s = e.get(k)
if(s is None):
Tinker.key_error(k, ET.tostring(e))
elif(not Tinker.is_string(s)):
Tinker.value_error_xml(k, s, "Strings", ET.tostring(e))
return s
def parse_float(e, key):
s = parse_string(e, key)
try:
return float(s)
except ValueError:
Tinker.value_error_xml(ks, s, "Real Numbers", ET.tostring(e))
def parse_int(e, key):
s = parse_string(e, key)
try:
return int(s)
except ValueError:
Tinker.value_error_xml(ks, s, "Integers", ET.tostring(e))
def parse_list_from_string(s):
return [e.strip() for e in s.split(",")]
def parse_list(e, key):
s = parse_string(e, key)
return [e.strip() for e in s.split(",")]
def parse_id(e):
id = parse_string(e, "id")
if(not Tinker.is_alphachar(id)):
value_error_xml("id", id, "Alphanumeric Characters", ET.tostring(e))
return id
def parse_macros(e):
macros = parse_list(e, "macros")
for m in macros:
if(not Tinker.is_valid_verilog_name(m)):
Tinker.value_error_xml("macros", m, "Valid Verilog Names",
ET.tostring(e))
return macros
def find(r, p):
e = r.find("./%s" % p)
if(e == None):
Tinker.path_error_xml(p, ET.tostring(r))
return es
def findall(r, t):
es = r.findall("./%s" % t)
if(len(es) == 0):
Tinker.path_error_xml(t, ET.tostring(r))
return es
def findsingle(r, t):
es = findall(r, t)
if(len(es) > 1):
print "In XML Element:"
print ET.tostring(r)
sys.exit("Multiple subelements with Tag %s found" % t)
return es[0]
def findunique(r, t):
es = findall(r, t)
if(Tinker.contains_duplicates([e.tag for e in es])):
print "In XML Element:"
print ET.tostring(r)
sys.exit("Subelements with matching Tags found. Tags must be unique")
return es
|
drichmond/tinker
|
python/IP.py
|
Python
|
bsd-3-clause
| 6,330
|
[
"TINKER"
] |
0ff8d5b142fbd565c93860910e0eb2e9f8e136e052863d39b4b7b2b9a781091b
|
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Supplies an abstract class for working with sequences of molecules
"""
class MolSupplier(object):
""" we must, at minimum, support forward iteration
"""
def __init__(self):
raise ValueError,'cannot instantiate MolSuppliers'
def Reset(self):
pass
def __iter__(self):
self.Reset()
return self
def next(self):
res = self.NextMol()
if res is not None:
return res
else:
raise StopIteration
def NextMol(self):
""" Must be implemented in child class
"""
pass
|
rdkit/rdkit-orig
|
rdkit/Chem/Suppliers/MolSupplier.py
|
Python
|
bsd-3-clause
| 830
|
[
"RDKit"
] |
2583c5192ca0d54b8cf3ca62795cd2facd8cb2d8fa77f2656507a0df95f0c8de
|
#!/usr/bin/env python
###############################################################################
# Copyright 2015-2016 University of Florida. All rights reserved.
# This file is part of UF CTS-IT's NACCulator project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
import csv
import re
import sys
import argparse
import traceback
from nacc.uds3 import blanks
from nacc.uds3.ivp import builder as ivp_builder
from nacc.uds3.np import builder as np_builder
from nacc.uds3.fvp import builder as fvp_builder
from nacc.uds3 import filters
def check_blanks(packet):
"""
Parses rules for when each field should be blank and then checks them
"""
pattern = re.compile(r"Blank if Question \d+ (\w+) (ne|=) (\d+)")
warnings = []
for form in packet:
# Find all fields that:
# 1) have blanking rules; and
# 2) aren't blank.
for field in [f for f in form.fields.itervalues()
if f.blanks and not empty(f)]:
for rule in field.blanks:
r = blanks.convert_rule_to_python(field.name, rule)
if r(packet):
warnings.append(
"'%s' is '%s' with length '%s', but should be blank: '%s'. Test form: '%s'" %
(field.name, field.value, len(field.value), rule, form.form_name))
return warnings
def check_single_select(packet):
""" Checks the values of sets of interdependent questions
There are some sets of questions which should function like an HTML radio
button group in that only one of them should be selected. However, because
of the manner in which they were implemented in REDCap, the values need to
be double-checked to ensure at most one in a given set has the real answer.
"""
warnings = list()
# D1 4
fields = ('AMNDEM', 'PCA', 'PPASYN', 'FTDSYN', 'LBDSYN', 'NAMNDEM')
if not exclusive(packet, fields):
warnings.append('For Form D1, Question 4, there is unexpectedly more '
'than one syndrome indicated as "Present".')
# D1 5
fields = ('MCIAMEM', 'MCIAPLUS', 'MCINON1', 'MCINON2', 'IMPNOMCI')
if not exclusive(packet, fields):
warnings.append('For Form D1, Question 5, there is unexpectedly more '
'than one syndrome indicated as "Present".')
# D1 11-39
fields = ('ALZDISIF', 'LBDIF', 'MSAIF', 'PSPIF', 'CORTIF', 'FTLDMOIF',
'FTLDNOIF', 'FTLDSUBX', 'CVDIF', 'ESSTREIF', 'DOWNSIF', 'HUNTIF',
'PRIONIF', 'BRNINJIF', 'HYCEPHIF', 'EPILEPIF', 'NEOPIF', 'HIVIF',
'OTHCOGIF', 'DEPIF', 'BIPOLDIF', 'SCHIZOIF', 'ANXIETIF',
'DELIRIF', 'PTSDDXIF', 'OTHPSYIF', 'ALCDEMIF', 'IMPSUBIF',
'DYSILLIF', 'MEDSIF', 'COGOTHIF', 'COGOTH2F', 'COGOTH3F')
if not exclusive(packet, fields):
warnings.append('For Form D1, Questions 11-39, there is unexpectedly '
'more than one Primary cause selected.')
return warnings
def empty(field):
""" Helper function that returns True if a field's value is empty """
return field.value.strip() == ""
def exclusive(packet, fields, value_to_check=1):
""" Returns True iff, for a set of fields, only one of field is set. """
values = [packet[f].value for f in fields]
true_values = filter(lambda v: v == value_to_check, values)
return len(true_values) <= 1
def set_blanks_to_zero(packet):
""" Sets specific fields to zero if they meet certain criteria """
def set_to_zero_if_blank(*field_names):
for field_name in field_names:
field = packet[field_name]
if empty(field):
field.value = 0
# B8 2.
if packet['PARKSIGN'] == 1:
set_to_zero_if_blank(
'RESTTRL', 'RESTTRR', 'SLOWINGL', 'SLOWINGR', 'RIGIDL', 'RIGIDR',
'BRADY', 'PARKGAIT', 'POSTINST')
# B8 3.
if packet['CVDSIGNS'] == 1:
set_to_zero_if_blank('CORTDEF', 'SIVDFIND', 'CVDMOTL', 'CVDMOTR',
'CORTVISL', 'CORTVISR', 'SOMATL', 'SOMATR')
# B8 5.
if packet['PSPCBS'] == 1:
set_to_zero_if_blank(
'PSPCBS', 'EYEPSP', 'DYSPSP', 'AXIALPSP', 'GAITPSP', 'APRAXSP',
'APRAXL', 'APRAXR', 'CORTSENL', 'CORTSENR', 'ATAXL', 'ATAXR',
'ALIENLML', 'ALIENLMR', 'DYSTONL', 'DYSTONR')
# D1 4.
if packet['DEMENTED'] == 1:
set_to_zero_if_blank(
'AMNDEM', 'PCA', 'PPASYN', 'FTDSYN', 'LBDSYN', 'NAMNDEM')
# D1 5.
if packet['DEMENTED'] == 0:
set_to_zero_if_blank(
'MCIAMEM', 'MCIAPLUS', 'MCINON1', 'MCINON2', 'IMPNOMCI')
# D1 11-39.
set_to_zero_if_blank(
'ALZDIS', 'LBDIS', 'MSA', 'PSP', 'CORT', 'FTLDMO', 'FTLDNOS', 'CVD',
'ESSTREM', 'DOWNS', 'HUNT', 'PRION', 'BRNINJ', 'HYCEPH', 'EPILEP',
'NEOP', 'HIV', 'OTHCOG', 'DEP', 'BIPOLDX', 'SCHIZOP', 'ANXIET',
'DELIR', 'PTSDDX', 'OTHPSY', 'ALCDEM', 'IMPSUB', 'DYSILL', 'MEDS',
'COGOTH', 'COGOTH2', 'COGOTH3')
# D2 11.
if packet['ARTH'] == 1:
set_to_zero_if_blank('ARTUPEX', 'ARTLOEX', 'ARTSPIN', 'ARTUNKN')
def main():
"""
Reads a REDCap exported CSV, data file, then prints it out in NACC's format
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process redcap form output to nacculator.')
else:
parser = raw_csv
filters_names = { 'cleanPtid' : 'clean_ptid',
'replaceDrugId' : 'replace_drug_id',
'fixC1S' : 'fix_c1s',
'fillDefault' : 'fill_default',
'updateField' : 'update_field'}
option_group = parser.add_mutually_exclusive_group()
option_group.add_argument('-fvp', action='store_true', dest='fvp', help='Set this flag to process as fvp data')
option_group.add_argument('-ivp', action='store_true', dest='ivp', help='Set this flag to process as ivp data')
option_group.add_argument('-np', action='store_true', dest='np', help='Set this flag to process as np data')
option_group.add_argument('-f', '--filter', action='store', dest='filter', choices=filters_names.keys(), help='Set this flag to process the filter')
parser.add_argument('-file', action='store', dest='file', help='Path of the csv file to be processed.')
parser.add_argument('-meta', action='store', dest='filter_meta', help='Input file for the filter metadata (in case -filter is used)')
options = parser.parse_args()
#options = None
# Defaults to processing of ivp.
# TODO this can be changed in future to process fvp by default.
#if(options == None):
# print "Hello Flask."
if not (options.ivp or options.fvp or options.np or options.filter):
options.ivp = True
fp = sys.stdin if options.file == None else open(options.file, 'r')
# Place holder for future. May need to output to a specific file in future.
output = sys.stdout
if options.filter:
filter_method = getattr(filters, 'filter_' + filters_names[options.filter])
filter_method(fp, options.filter_meta, output)
else:
reader = csv.DictReader(fp)
for record in reader:
print >> sys.stderr, "[START] ptid : " + str(record['ptid'])
print >> sys.stderr, "[Date(M-D-Y)][Visit #]: ["+ str(record['visitmo']) + "-" + str(record['visitday']) + "-" + str(record['visityr']) + "][" + str(record['visitnum']) + "]"
try:
if options.ivp:
packet = ivp_builder.build_uds3_ivp_form(record)
elif options.np:
packet = np_builder.build_uds3_np_form(record)
elif options.fvp:
packet = fvp_builder.build_uds3_fvp_form(record)
except Exception, exp:
if 'ptid' in record:
print >> sys.stderr, "[SKIP] Error for ptid : " + str(record['ptid'])
traceback.print_exc()
continue
if not options.np:
set_blanks_to_zero(packet)
warnings = []
warnings += check_blanks(packet)
if not options.np:
warnings += check_single_select(packet)
if warnings:
print >> sys.stderr, "\n".join(warnings)
for form in packet:
print form
if __name__ == '__main__':
main()
|
ZacZZZ/Nacculator_Github
|
nacc/backup/redcap2nacc-7-3-17.py
|
Python
|
bsd-2-clause
| 8,578
|
[
"VisIt"
] |
fee4b1514c5217fe86c5a33b34ca6274044d1d53a6c83a262875b96528a2fdf5
|
"""feedfinder: Find the Web feed for a Web page
http://www.aaronsw.com/2002/feedfinder/
Usage:
feed(uri) - returns feed found for a URI
feeds(uri) - returns all feeds found for a URI
>>> import feedfinder
>>> feedfinder.feed('scripting.com')
'http://scripting.com/rss.xml'
>>>
>>> feedfinder.feeds('scripting.com')
['http://delong.typepad.com/sdj/atom.xml',
'http://delong.typepad.com/sdj/index.rdf',
'http://delong.typepad.com/sdj/rss.xml']
>>>
Can also use from the command line. Feeds are returned one per line:
$ python feedfinder.py diveintomark.org
http://diveintomark.org/xml/atom.xml
How it works:
0. At every step, feeds are minimally verified to make sure they are really feeds.
1. If the URI points to a feed, it is simply returned; otherwise
the page is downloaded and the real fun begins.
2. Feeds pointed to by LINK tags in the header of the page (autodiscovery)
3. <A> links to feeds on the same server ending in ".rss", ".rdf", ".xml", or
".atom"
4. <A> links to feeds on the same server containing "rss", "rdf", "xml", or "atom"
5. <A> links to feeds on external servers ending in ".rss", ".rdf", ".xml", or
".atom"
6. <A> links to feeds on external servers containing "rss", "rdf", "xml", or "atom"
7. Try some guesses about common places for feeds (index.xml, atom.xml, etc.).
8. As a last ditch effort, we search Syndic8 for feeds matching the URI
"""
__version__ = "1.371"
__date__ = "2006-04-24"
__maintainer__ = "Aaron Swartz (me@aaronsw.com)"
__author__ = "Mark Pilgrim (http://diveintomark.org)"
__copyright__ = "Copyright 2002-4, Mark Pilgrim; 2006 Aaron Swartz"
__license__ = "Python"
__credits__ = """Abe Fettig for a patch to sort Syndic8 feeds by popularity
Also Jason Diamond, Brian Lalor for bug reporting and patches"""
_debug = 0
import sgmllib, urllib, urlparse, re, sys, robotparser
import requests
from StringIO import StringIO
from lxml import etree
# XML-RPC support allows feedfinder to query Syndic8 for possible matches.
# Python 2.3 now comes with this module by default, otherwise you can download it
try:
import xmlrpclib # http://www.pythonware.com/products/xmlrpc/
except ImportError:
xmlrpclib = None
if not dict:
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
def _debuglog(message):
if _debug: print message
class URLGatekeeper:
"""a class to track robots.txt rules across multiple servers"""
def __init__(self):
self.rpcache = {} # a dictionary of RobotFileParser objects, by domain
self.urlopener = urllib.FancyURLopener()
self.urlopener.version = "PyTune Feed Finder (Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 Safari/534.48.3)"
_debuglog(self.urlopener.version)
self.urlopener.addheaders = [('User-Agent', self.urlopener.version)]
# self.urlopener.addheaders = [('User-Agent', self.urlopener.version), ('Accept', '*')]
robotparser.URLopener.version = self.urlopener.version
robotparser.URLopener.addheaders = self.urlopener.addheaders
def _getrp(self, url):
protocol, domain = urlparse.urlparse(url)[:2]
if self.rpcache.has_key(domain):
return self.rpcache[domain]
baseurl = '%s://%s' % (protocol, domain)
robotsurl = urlparse.urljoin(baseurl, 'robots.txt')
_debuglog('fetching %s' % robotsurl)
rp = robotparser.RobotFileParser(robotsurl)
try:
rp.read()
except:
pass
self.rpcache[domain] = rp
return rp
def can_fetch(self, url):
rp = self._getrp(url)
allow = rp.can_fetch(self.urlopener.version, url)
_debuglog("gatekeeper of %s says %s" % (url, allow))
return allow
def get(self, url, check=False):
if check and not self.can_fetch(url): return ''
try:
return requests.get(url, headers=dict(self.urlopener.addheaders)).content
except:
return ''
_gatekeeper = URLGatekeeper()
class BaseParser(sgmllib.SGMLParser):
def __init__(self, baseuri):
sgmllib.SGMLParser.__init__(self)
self.links = []
self.baseuri = baseuri
def normalize_attrs(self, attrs):
def cleanattr(v):
v = sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v)
if not v: return
v = v.strip()
v = v.replace('<', '<').replace('>', '>').replace(''', "'").replace('"', '"').replace('&', '&')
return v
attrs = [(k.lower(), cleanattr(v)) for k, v in attrs if cleanattr(v)]
attrs = [(k, k in ('rel','type') and v.lower() or v) for k, v in attrs if cleanattr(v)]
return attrs
def do_base(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.baseuri = attrsD['href']
def error(self, *a, **kw): pass # we're not picky
class LinkParser(BaseParser):
FEED_TYPES = ('application/rss+xml',
'text/xml',
'application/atom+xml',
'application/x.atom+xml',
'application/x-atom+xml')
def do_link(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('rel'): return
rels = attrsD['rel'].split()
if 'alternate' not in rels: return
if attrsD.get('type') not in self.FEED_TYPES: return
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
class ALinkParser(BaseParser):
def start_a(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
def makeFullURI(uri):
if not uri: return
uri = uri.strip()
if uri.startswith('feed://'):
uri = 'http://' + uri.split('feed://', 1).pop()
for x in ['http', 'https']:
if uri.startswith('%s://' % x):
return uri
return 'http://%s' % uri
def getLinks(data, baseuri):
p = LinkParser(baseuri)
p.feed(data)
return p.links
def getLinksLXML(data, baseuri):
parser = etree.HTMLParser(recover=True)
tree = etree.parse(StringIO(data), parser)
links = []
for link in tree.findall('.//link'):
if link.attrib.get('type') in LinkParser.FEED_TYPES:
href = link.attrib['href']
if href: links.append(href)
return links
def getALinks(data, baseuri):
p = ALinkParser(baseuri)
p.feed(data)
return p.links
def getLocalLinks(links, baseuri):
found_links = []
if not baseuri: return found_links
baseuri = baseuri.lower()
for l in links:
try:
if l.lower().startswith(baseuri):
found_links.append(l)
except (AttributeError, UnicodeDecodeError):
pass
return found_links
def isFeedLink(link):
return link[-4:].lower() in ('.rss', '.rdf', '.xml', '.atom')
def isXMLRelatedLink(link):
link = link.lower()
return link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom')
r_brokenRedirect = re.compile('<newLocation[^>]*>(.*?)</newLocation>', re.S)
def tryBrokenRedirect(data):
if '<newLocation' in data:
newuris = r_brokenRedirect.findall(data)
if newuris and newuris[0]: return newuris[0].strip()
def couldBeFeedData(data):
data = data.lower()
if data.count('<html'): return 0
return data.count('<rss') + data.count('<rdf') + data.count('<feed')
def isFeed(uri):
_debuglog('seeing if %s is a feed' % uri)
protocol = urlparse.urlparse(uri)
if protocol[0] not in ('http', 'https'): return 0
try:
data = _gatekeeper.get(uri, check=False)
except (KeyError, UnicodeDecodeError):
return False
count = couldBeFeedData(data)
return count
def sortFeeds(feed1Info, feed2Info):
return cmp(feed2Info['headlines_rank'], feed1Info['headlines_rank'])
def getFeedsFromSyndic8(uri):
feeds = []
try:
server = xmlrpclib.Server('http://www.syndic8.com/xmlrpc.php')
feedids = server.syndic8.FindFeeds(uri)
infolist = server.syndic8.GetFeedInfo(feedids, ['headlines_rank','status','dataurl'])
infolist.sort(sortFeeds)
feeds = [f['dataurl'] for f in infolist if f['status']=='Syndicated']
_debuglog('found %s feeds through Syndic8' % len(feeds))
except:
pass
return feeds
def feeds(uri, all=False, querySyndic8=False, _recurs=None):
if _recurs is None: _recurs = [uri]
fulluri = makeFullURI(uri)
try:
data = _gatekeeper.get(fulluri, check=False)
except:
return []
# is this already a feed?
if couldBeFeedData(data):
return [fulluri]
newuri = tryBrokenRedirect(data)
if newuri and newuri not in _recurs:
_recurs.append(newuri)
return feeds(newuri, all=all, querySyndic8=querySyndic8, _recurs=_recurs)
# nope, it's a page, try LINK tags first
_debuglog('looking for LINK tags')
try:
outfeeds = getLinks(data, fulluri)
except:
outfeeds = []
if not outfeeds:
_debuglog('using lxml to look for LINK tags')
try:
outfeeds = getLinksLXML(data, fulluri)
except:
outfeeds = []
_debuglog('found %s feeds through LINK tags' % len(outfeeds))
outfeeds = filter(isFeed, outfeeds)
if all or not outfeeds:
# no LINK tags, look for regular <A> links that point to feeds
_debuglog('no LINK tags, looking at A tags')
try:
links = getALinks(data, fulluri)
except:
links = []
_debuglog('no LINK tags, looking at local links')
locallinks = getLocalLinks(links, fulluri)
# look for obvious feed links on the same server
outfeeds.extend(filter(isFeed, filter(isFeedLink, locallinks)))
if all or not outfeeds:
# look harder for feed links on the same server
outfeeds.extend(filter(isFeed, filter(isXMLRelatedLink, locallinks)))
if all or not outfeeds:
# look for obvious feed links on another server
outfeeds.extend(filter(isFeed, filter(isFeedLink, links)))
if all or not outfeeds:
# look harder for feed links on another server
outfeeds.extend(filter(isFeed, filter(isXMLRelatedLink, links)))
if all or not outfeeds:
_debuglog('no A tags, guessing')
suffixes = [ # filenames used by popular software:
'feed/', # obvious
'atom.xml', # blogger, TypePad
'index.atom', # MT, apparently
'index.rdf', # MT
'rss.xml', # Dave Winer/Manila
'index.xml', # MT
'index.rss' # Slash
]
outfeeds.extend(filter(isFeed, [urlparse.urljoin(fulluri, x) for x in suffixes]))
if (all or not outfeeds) and querySyndic8:
# still no luck, search Syndic8 for feeds (requires xmlrpclib)
_debuglog('still no luck, searching Syndic8')
outfeeds.extend(getFeedsFromSyndic8(uri))
if hasattr(__builtins__, 'set') or __builtins__.has_key('set'):
outfeeds = list(set(outfeeds))
return outfeeds
getFeeds = feeds # backwards-compatibility
def feed(uri):
#todo: give preference to certain feed formats
feedlist = feeds(uri)
if feedlist:
feeds_no_comments = filter(lambda f: 'comments' not in f.lower(), feedlist)
if feeds_no_comments:
return feeds_no_comments[0]
return feedlist[0]
else:
return None
##### test harness ######
def test():
uri = 'http://diveintomark.org/tests/client/autodiscovery/html4-001.html'
failed = []
count = 0
while 1:
data = _gatekeeper.get(uri)
if data.find('Atom autodiscovery test') == -1: break
sys.stdout.write('.')
sys.stdout.flush()
count += 1
links = getLinks(data, uri)
if not links:
print '\n*** FAILED ***', uri, 'could not find link'
failed.append(uri)
elif len(links) > 1:
print '\n*** FAILED ***', uri, 'found too many links'
failed.append(uri)
else:
atomdata = urllib.urlopen(links[0]).read()
if atomdata.find('<link rel="alternate"') == -1:
print '\n*** FAILED ***', uri, 'retrieved something that is not a feed'
failed.append(uri)
else:
backlink = atomdata.split('href="').pop().split('"')[0]
if backlink != uri:
print '\n*** FAILED ***', uri, 'retrieved wrong feed'
failed.append(uri)
if data.find('<link rel="next" href="') == -1: break
uri = urlparse.urljoin(uri, data.split('<link rel="next" href="').pop().split('"')[0])
print
print count, 'tests executed,', len(failed), 'failed'
if __name__ == '__main__':
args = sys.argv[1:]
if args and args[0] == '--debug':
_debug = 1
args.pop(0)
if args:
uri = args[0]
else:
uri = 'http://diveintomark.org/'
if uri == 'test':
test()
else:
print "\n".join(getFeeds(uri))
|
Einsteinish/PyTune3
|
utils/feedfinder.py
|
Python
|
mit
| 13,462
|
[
"Brian"
] |
7fbafad897794f33d6a2879328b8676cdbab0029f49e760c1875675cf053aa67
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import lib
from pyscf.pbc import gto as pgto
from pyscf.pbc import scf as pscf
import pyscf.pbc
from pyscf.pbc.df import rsdf
from pyscf.pbc.df import rsdf_jk, df_jk
#from mpi4pyscf.pbc.df import df
#from mpi4pyscf.pbc.df import df_jk
pyscf.pbc.DEBUG = False
L = 5.
n = 11
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.mesh = numpy.array([n, n, n])
cell.atom = '''C 3. 2. 3.
C 1. 1. 1.'''
cell.basis = 'ccpvdz'
cell.verbose = 0
cell.max_memory = 0
cell.rcut = 28.3458918685
cell.build()
cell0 = pgto.Cell()
cell0.a = numpy.eye(3) * L
cell0.atom = '''C 3. 2. 3.
C 1. 1. 1.'''
cell0.basis = 'sto-3g'
cell0.verbose = 0
cell0.build()
def tearDownModule():
global cell, cell0
del cell, cell0
class KnownValues(unittest.TestCase):
def test_jk_single_kpt(self):
mf = cell0.RHF().rs_density_fit(auxbasis='weigend')
mf.with_df.mesh = [n, n, n]
mf.with_df.omega = 0.3
mf.with_df.exp_to_discard = 0.3
dm = mf.get_init_guess()
vj, vk = mf.get_jk(cell0, dm)
ej1 = numpy.einsum('ij,ji->', vj, dm)
ek1 = numpy.einsum('ij,ji->', vk, dm)
j_ref = 50.52980612772263 # rsjk result
k_ref = 38.84221371860046 # rsjk result
self.assertAlmostEqual(ej1, j_ref, 2)
self.assertAlmostEqual(ek1, k_ref, 2)
self.assertAlmostEqual(ej1, 50.5281508168606592, 7)
self.assertAlmostEqual(ek1, 38.8381202228168902, 7)
numpy.random.seed(12)
nao = cell0.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
vj1, vk1 = mf.get_jk(cell0, dm, hermi=0)
ej1 = numpy.einsum('ij,ji->', vj1, dm)
ek1 = numpy.einsum('ij,ji->', vk1, dm)
self.assertAlmostEqual(ej1, 25.8129854396903085, 7)
self.assertAlmostEqual(ek1, 72.6088517627853207, 7)
def test_jk_single_kpt_high_cost(self):
mf0 = pscf.RHF(cell)
mf0.exxdiv = None
mf = rsdf_jk.density_fit(mf0, auxbasis='weigend', mesh=(11,)*3)
mf.with_df.mesh = cell.mesh
mf.with_df.omega = 0.3
mf.with_df.exp_to_discard = 0.3
dm = mf.get_init_guess()
vj, vk = mf.get_jk(cell, dm)
ej1 = numpy.einsum('ij,ji->', vj, dm)
ek1 = numpy.einsum('ij,ji->', vk, dm)
j_ref = 48.283789539266174 # rsjk result
k_ref = 32.30441176447805 # rsjk result
self.assertAlmostEqual(ej1, j_ref, 4)
self.assertAlmostEqual(ek1, k_ref, 2)
self.assertAlmostEqual(ej1, 48.2837455394308037, 7)
self.assertAlmostEqual(ek1, 32.3026087105977950, 7)
numpy.random.seed(12)
nao = cell.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
vj1, vk1 = mf.get_jk(cell, dm, hermi=0)
ej1 = numpy.einsum('ij,ji->', vj1, dm)
ek1 = numpy.einsum('ij,ji->', vk1, dm)
self.assertAlmostEqual(ej1, 242.0467816643269714, 7)
self.assertAlmostEqual(ek1, 280.1593488661793572, 7)
numpy.random.seed(1)
kpt = numpy.random.random(3)
mydf = rsdf.RSDF(cell, [kpt]).set(auxbasis='weigend')
mydf.linear_dep_threshold = 1e-7
mydf.omega = 0.3
mydf.exp_to_discard = 0.3
vj, vk = mydf.get_jk(dm, 1, kpt, exxdiv=None)
ej1 = numpy.einsum('ij,ji->', vj, dm)
ek1 = numpy.einsum('ij,ji->', vk, dm)
self.assertAlmostEqual(ej1, 241.1512182675005249+0j, 7)
self.assertAlmostEqual(ek1, 279.6464915858919085+0j, 7)
vj, vk = mydf.get_jk(dm, 1, kpt, with_j=False, exxdiv='ewald')
ek1 = numpy.einsum('ij,ji->', vk, dm)
self.assertAlmostEqual(ek1, 691.6462442086188958+0j, 6)
def test_jk_hermi0(self):
numpy.random.seed(12)
nao = cell0.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
dm[:2,-3:] *= .5
jkdf = rsdf.RSDF(cell0).set(auxbasis='weigend')
jkdf.linear_dep_threshold = 1e-7
jkdf.omega = 0.3
jkdf.exp_to_discard = 0.3
vj0, vk0 = jkdf.get_jk(dm, hermi=0, exxdiv=None)
ej0 = numpy.einsum('ij,ji->', vj0, dm)
ek0 = numpy.einsum('ij,ji->', vk0, dm)
self.assertAlmostEqual(ej0, 25.7750081387043, 7)
self.assertAlmostEqual(ek0, 30.8140235220774, 7)
def test_jk_hermi0_high_cost(self):
numpy.random.seed(12)
nao = cell.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
dm[:2,-3:] *= .5
jkdf = rsdf.RSDF(cell).set(auxbasis='weigend')
jkdf.linear_dep_threshold = 1e-7
jkdf.omega = 0.3
jkdf.exp_to_discard = 0.3
vj0, vk0 = jkdf.get_jk(dm, hermi=0, exxdiv=None)
ej0 = numpy.einsum('ij,ji->', vj0, dm)
ek0 = numpy.einsum('ij,ji->', vk0, dm)
self.assertAlmostEqual(ej0, 242.0415113546338546, 7)
self.assertAlmostEqual(ek0, 280.5844313219625974, 7)
def test_j_kpts(self):
numpy.random.seed(1)
nao = cell0.nao_nr()
dm = numpy.random.random((4,nao,nao))
dm = dm + dm.transpose(0,2,1)
mydf = rsdf.RSDF(cell0).set(auxbasis='weigend')
mydf.linear_dep_threshold = 1e-7
mydf.kpts = numpy.random.random((4,3))
mydf.auxbasis = 'weigend'
mydf.omega = 0.3
mydf.exp_to_discard = 0.3
vj = df_jk.get_j_kpts(mydf, dm, 1, mydf.kpts)
self.assertAlmostEqual(lib.fp(vj[0]), (7.240207870630442-0.001010622364950332j) , 7)
self.assertAlmostEqual(lib.fp(vj[1]), (7.248745538469966-0.001562604522803734j) , 7)
self.assertAlmostEqual(lib.fp(vj[2]), (7.241193241602369-0.002518439407055759j) , 7)
self.assertAlmostEqual(lib.fp(vj[3]), (7.2403591406956185+0.001475803952777666j), 7)
def test_j_kpts_high_cost(self):
numpy.random.seed(1)
nao = cell.nao_nr()
dm = numpy.random.random((4,nao,nao))
dm = dm + dm.transpose(0,2,1)
mydf = rsdf.RSDF(cell).set(auxbasis='weigend')
mydf.linear_dep_threshold = 1e-7
mydf.kpts = numpy.random.random((4,3))
mydf.auxbasis = 'weigend'
mydf.omega = 0.3
mydf.exp_to_discard = 0.3
vj = df_jk.get_j_kpts(mydf, dm, 1, mydf.kpts)
self.assertAlmostEqual(lib.fp(vj[0]), (0.4917612920404451 + -0.1189108415838486j), 7)
self.assertAlmostEqual(lib.fp(vj[1]), (0.5490079977477804 + -0.0460035459549861j), 7)
self.assertAlmostEqual(lib.fp(vj[2]), (0.5364805888399165 + -0.0835075280950256j), 7)
self.assertAlmostEqual(lib.fp(vj[3]), (0.5489645342271054 + 0.0076957400601779j), 7)
def test_k_kpts(self):
numpy.random.seed(1)
nao = cell0.nao_nr()
dm = numpy.random.random((4,nao,nao))
dm = dm + dm.transpose(0,2,1)
mydf = rsdf.RSDF(cell0).set(auxbasis='weigend')
mydf.linear_dep_threshold = 1e-7
mydf.kpts = numpy.random.random((4,3))
mydf.exxdiv = None
mydf.omega = 0.3
mydf.exp_to_discard = 0.3
mydf.auxbasis = 'weigend'
vk = df_jk.get_k_kpts(mydf, dm, 0, mydf.kpts)
self.assertAlmostEqual(lib.fp(vk[0]), (4.831027586092549-0.12376435978940196j) , 7)
self.assertAlmostEqual(lib.fp(vk[1]), (4.783208264204395-0.00585421470169705j) , 7)
self.assertAlmostEqual(lib.fp(vk[2]), (4.823839360632854+0.002511545727704362j), 7)
self.assertAlmostEqual(lib.fp(vk[3]), (4.833891390413435+0.0208696082684768j) , 7)
def test_k_kpts_high_cost(self):
numpy.random.seed(1)
nao = cell.nao_nr()
dm = numpy.random.random((4,nao,nao))
dm = dm + dm.transpose(0,2,1)
mydf = rsdf.RSDF(cell).set(auxbasis='weigend')
mydf.linear_dep_threshold = 1e-7
mydf.kpts = numpy.random.random((4,3))
mydf.exxdiv = None
mydf.omega = 0.3
mydf.exp_to_discard = 0.3
mydf.auxbasis = 'weigend'
vk = df_jk.get_k_kpts(mydf, dm, 0, mydf.kpts)
self.assertAlmostEqual(lib.fp(vk[0]), (-2.8332378458006682 + -1.0578692394119324j), 7)
self.assertAlmostEqual(lib.fp(vk[1]), (-7.4404313581193380 + 0.1023364493364826j), 7)
self.assertAlmostEqual(lib.fp(vk[2]), (-2.5718854219888430 + -1.4487422365382123j), 7)
self.assertAlmostEqual(lib.fp(vk[3]), (-0.7922307287610381 + 0.0116940681352038j), 7)
def test_k_kpts_1(self):
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; He .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}
cell.a = numpy.eye(3) * 2.5
cell.mesh = [11] * 3
cell.build()
kpts = cell.get_abs_kpts([[-.25,-.25,-.25],
[-.25,-.25, .25],
[-.25, .25,-.25],
[-.25, .25, .25],
[ .25,-.25,-.25],
[ .25,-.25, .25],
[ .25, .25,-.25],
[ .25, .25, .25]])
numpy.random.seed(1)
nao = cell.nao_nr()
dm = numpy.random.random((8,nao,nao))
mydf = rsdf.RSDF(cell).set(auxbasis='weigend')
mydf.linear_dep_threshold = 1e-7
mydf.kpts = kpts
mydf.auxbasis = {'He': [(0, (4.096, 1)), (0, (2.56, 1)), (0, (1.6, 1)), (0, (1., 1))]}
mydf.exxdiv = None
mydf.omega = 0.3
mydf.exp_to_discard = 0.3
vk = df_jk.get_k_kpts(mydf, dm, 0, mydf.kpts)
self.assertAlmostEqual(lib.fp(vk[0]), (0.54220010040518218-0.00787204295681934j ), 7)
self.assertAlmostEqual(lib.fp(vk[1]), (0.35987105007103914+0.0036047438452865574j), 7)
self.assertAlmostEqual(lib.fp(vk[2]), (0.46287057223452965-0.0065045318150024475j), 7)
self.assertAlmostEqual(lib.fp(vk[3]), (0.63677390788341914+0.0075132081533213447j), 7)
self.assertAlmostEqual(lib.fp(vk[4]), (0.53680188658523353-0.0076414750780774933j), 7)
self.assertAlmostEqual(lib.fp(vk[5]), (0.49613855046499666+0.0060603767383680838j), 7)
self.assertAlmostEqual(lib.fp(vk[6]), (0.45430752211150049-0.0068611602260866128j), 7)
self.assertAlmostEqual(lib.fp(vk[7]), (0.41856931218763038+0.0051073315205987522j), 7)
def test_k_kpts_2(self):
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; He .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}
cell.a = numpy.eye(3) * 2.5
cell.mesh = [11] * 3
cell.build()
kpts = cell.get_abs_kpts([[-.25,-.25,-.25],
[-.25,-.25, .25],
[-.25, .25,-.25],
[-.25, .25, .25],
[ .25,-.25,-.25],
[ .25,-.25, .25],
[ .25, .25,-.25],
[ .25, .25, .25]])
mydf = rsdf.RSDF(cell).set(auxbasis='weigend')
mydf.linear_dep_threshold = 1e-7
mydf.kpts = kpts
mydf.auxbasis = {'He': [(0, (4.096, 1)), (0, (2.56, 1)), (0, (1.6, 1)), (0, (1., 1))]}
mydf.exxdiv = None
mydf.omega = 0.3
mydf.exp_to_discard = 0.3
nao = cell.nao_nr()
numpy.random.seed(1)
dm = numpy.random.random((8,nao,nao))
dm = dm + dm.transpose(0,2,1)
vk = df_jk.get_k_kpts(mydf, dm, 1, mydf.kpts)
self.assertAlmostEqual(lib.fp(vk[0]), (1.0940331326660724 -0.01474246983191657j ), 7)
self.assertAlmostEqual(lib.fp(vk[1]), (0.72106828546205248+0.008683360062569572j), 7)
self.assertAlmostEqual(lib.fp(vk[2]), (0.89868267009698988-0.011091489111877838j), 7)
self.assertAlmostEqual(lib.fp(vk[3]), (1.2604941401190835 +0.015979544115384041j), 7)
self.assertAlmostEqual(lib.fp(vk[4]), (1.0492129520812594 -0.012424653667344821j), 7)
self.assertAlmostEqual(lib.fp(vk[5]), (0.99271107721956797+0.012696925711370165j), 7)
self.assertAlmostEqual(lib.fp(vk[6]), (0.92184754518871648-0.012035727588110348j), 7)
self.assertAlmostEqual(lib.fp(vk[7]), (0.8518483148628242 +0.010084767506077213j), 7)
if __name__ == '__main__':
print("Full Tests for rsdf_jk")
unittest.main()
|
sunqm/pyscf
|
pyscf/pbc/df/test/test_rsdf_jk.py
|
Python
|
apache-2.0
| 12,887
|
[
"PySCF"
] |
335133911b3d4a941c68d307907cd93c1d8b8478136c25ebe900a384ef53e79a
|
import os
import Rappture
from Rappture.tools import executeCommand as RapptureExec
import shutil
import sys
import re
import stat
import tempfile
import string
import threading
import time
import math
import zipfile
import numpy as np
from math import cos, sin
param_file_template = """' ========= Parameter file for v7.3 ==================='
'**** Preliminaries ****'
'{CMDTRQ}' = CMTORQ*6 (DOTORQ, NOTORQ) -- either do or skip torque calculations
'{CMDSOL}' = CMDSOL*6 (PBCGS2, PBCGST, GPBICG, QMRCCG, PETRKP) -- CCG method
'{CMDFFT}' = CMETHD*6 (GPFAFT, FFTMKL) -- FFT method
'{CALPHA}' = CALPHA*6 (GKDLDR, LATTDR, FLTRCD) -- DDA method
'{CBINFLAG}' = CBINFLAG (NOTBIN, ORIBIN, ALLBIN) -- specify binary output
'**** Initial Memory Allocation ****'
{dipole_dim1} {dipole_dim2} {dipole_dim3} = dimensioning allowance for target generation
'**** Target Geometry and Composition ****'
'{CSHAPE}' = CSHAPE*9 shape directive
{SHPAR1} {SHPAR2} {SHPAR3} {SHPAR4} {SHPAR5} {SHPAR6} {SHPAR7} = shape parameters 1 - 3
{NCOMP} {NCOMP1} {NCOMP2} {NCOMP3} = NCOMP = number of dielectric materials
{materials1}
{materials2}
{materials3}
{materials4}
{materials5}
{materials6}
{materials7}
{materials8}
{materials9}
'**** Additional Nearfield calculation? ****'
{NRFLD} = NRFLD (=0 to skip nearfield calc., =1 to calculate nearfield E)
{NRFLD_r1} {NRFLD_r2} {NRFLD_r3} {NRFLD_r4} {NRFLD_r5} {NRFLD_r6} (fract. extens. of calc. vol. in -x,+x,-y,+y,-z,+z)
'**** Error Tolerance ****'
{TOL} = TOL = MAX ALLOWED (NORM OF |G>=AC|E>-ACA|X>)/(NORM OF AC|E>)
'**** maximum number of iterations allowed ****'
{MXITER} = MXITER
'**** Interaction cutoff parameter for PBC calculations ****'
{GAMMA} = GAMMA (1e-2 is normal, 3e-3 for greater accuracy)
'**** Angular resolution for calculation of <cos>, etc. ****'
{ETASCA} = ETASCA (number of angles is proportional to [(3+x)/ETASCA]^2 )
'**** Vacuum wavelengths (micron) ****'
{WAVINI} {WAVEND} {NWAV} '{WCDIVID}' = wavelengths (first,last,how many,how=LIN,INV,LOG)
'**** Refractive index of ambient medium'
{NAMBIENT} = NAMBIENT
'**** Effective Radii (micron) **** '
{AEFFINI} {AEFFEND} {NRAD} '{RCDIVID}' = aeff (first,last,how many,how=LIN,INV,LOG)
'**** Define Incident Polarizations ****'
({X1},{X2}) ({Y1},{Y2}) ({Z1},{Z2}) = Polarization state e01 (k along x axis)
{IORTH} = IORTH (=1 to do only pol. state e01; =2 to also do orth. pol. state)
'**** Specify which output files to write ****'
{IWRKSC} = IWRKSC (=0 to suppress, =1 to write ".sca" file for each target orient.
'**** Prescribe Target Rotations ****'
{BETA} {BETA} 1 = BETAMI, BETAMX, NBETA (beta=rotation around a1)
{THET} {THET} 1 = THETMI, THETMX, NTHETA (theta=angle between a1 and k)
{PHI} {PHI} 1 = PHIMIN, PHIMAX, NPHI (phi=rotation angle of a1 around k)
'**** Specify first IWAV, IRAD, IORI (normally 0 0 0) ****'
0 0 0 = first IWAV, first IRAD, first IORI (0 0 0 to begin fresh)
'**** Select Elements of S_ij Matrix to Print ****'
6 = NSMELTS = number of elements of S_ij to print (not more than 9)
11 12 21 22 31 41 = indices ij of elements to print
'**** Specify Scattered Directions ****'
{FRAME_TYPE} = CMDFRM (LFRAME, TFRAME for Lab Frame or Target Frame)
{NPLANES}{NPLANE_TEXT}
{PLANE1}
{PLANE2}
{PERIODIC_SCATTERING_ORDERS}
"""
def log(msg):
try:
driver.put('output.log(output_log)', msg, append=True)
except NameError:
pass
def find_all(name, path):
"""Find all the files with the specified name in path.
Returns a generator of file names, ordered by wavelength value.
"""
for root, _, files in os.walk(path):
if name in files:
yield os.path.join(root, name)
def check_lightshuttle():
"""Find the incident light point that was saved in Blender.
Returns the rotation values that need to be implemented prior to any manually DDSCAT-set rotations.
"""
sessionnum = os.getcwd().split('/')[-1]
for root, _, files in os.walk(os.getcwd()):
for fil in files:
if fil.startswith("PolarLight") == True:
your_shuttle = os.path.join(root,fil)
with open(your_shuttle,'r') as blend_file:
input_data1 = blend_file.readline()
input_data2 = blend_file.readline()
xLF = [float(input_data1.split()[1]),float(input_data1.split()[2]),float(input_data1.split()[3])]
yLF = [float(input_data2.split()[1]),float(input_data2.split()[2]),float(input_data2.split()[3])]
if 'xLF' not in locals():
xLF = [1, 0, 0]
if 'yLF' not in locals():
yLF = [0, 1, 0]
# could start using numpy arrays...
g = np.array(xLF)
h = np.array(yLF)
z = np.cross(g,h)
zLF = z.tolist()
# zLF = [ xLF[1]*yLF[2] - xLF[2]*yLF[1], xLF[2]*yLF[0] - xLF[0]*yLF[2], xLF[0]*yLF[1] - xLF[1]*yLF[0] ]
a1 = [1, 0, 0]
a2 = [0, 1, 0]
a3 = [0, 0, 1]
# normalize the vectors being used
magx = math.sqrt(sum(xLF[i]*xLF[i] for i in range(len(xLF))))
xLF = [ xLF[i]/magx for i in range(len(xLF)) ]
magy = math.sqrt(sum(yLF[i]*yLF[i] for i in range(len(yLF))))
yLF = [ yLF[i]/magy for i in range(len(yLF)) ]
magz = math.sqrt(sum(zLF[i]*zLF[i] for i in range(len(zLF))))
zLF = [ zLF[i]/magz for i in range(len(zLF)) ]
dotX = sum(xLF[i]*a1[i] for i in range(len(xLF)))
dotY = sum(yLF[i]*a2[i] for i in range(len(yLF)))
dotZ = sum(zLF[i]*a3[i] for i in range(len(zLF)))
brotX = math.degrees(math.acos(dotX))
brotY = math.degrees(math.acos(dotY))
brotZ = math.degrees(math.acos(dotZ))
return brotX,brotY,brotZ
def memory_check(NAT):
"""
Checks if the generation of a shape of dipole size = NAT will cause the user's disk quota
to be overdrawn.
Input: NAT in number of dipoles
Output: Pass/Fail Status, how much space was requested (MB), how much space there is (MB)
"""
# final value in MB
user = os.getenv("USER","nobody")
with open('ddaUser','w') as userQuota:
userQuota.write('getquota user={0}\n'.format(user))
with open('ddaUser','r') as userQuota:
returncode,quotaStdout,quotaStderr = RapptureExec(['nc','-w','5','fshome.nanohub.org','301'],stdin=userQuota,streamOutput=False)
try:
os.remove('ddaUser')
except:
pass
outline = quotaStdout + quotaStderr
space_used = float((outline.split(',')[-3]).split('=')[-1])/float(1024*1000)
max_hardspace = float((outline.split(',')[-4]).split('=')[-1])/float(1024*1000)
max_softspace = float((outline.split(',')[-5]).split('=')[-1])/float(1024*1000)
free_mem = max_hardspace - space_used
# 1 MB is reserved to prevent total hardspace usage. This reserve can be made larger if problems still arise.
mem_to_use = (float(NAT) * 0.00115) + 1
check_space = free_mem - mem_to_use
if (check_space < 0):
return 0, mem_to_use, free_mem
elif (check_space >= 0):
return 1, mem_to_use, free_mem
def memory_check_filesize(fileSize):
"""
Checks if the generation of a fileSize (in bytes) will cause the user's disk quota
to be overdrawn.
Input: fileSize in bytes
Output: Pass/Fail Status, how much space was requested (MB), how much space there is (MB)
"""
# incoming fileSize is in bytes
user = os.getenv("USER","nobody")
with open('ddaUser','w') as userQuota:
userQuota.write('getquota user={0}\n'.format(user))
with open('ddaUser','r') as userQuota:
returncode,quotaStdout,quotaStderr = RapptureExec(['nc','-w','5','fshome.nanohub.org','301'],stdin=userQuota,streamOutput=False)
try:
os.remove('ddaUser')
except:
pass
outline = quotaStdout + quotaStderr
space_used = float((outline.split(',')[-3]).split('=')[-1])/float(1024*1000)
max_hardspace = float((outline.split(',')[-4]).split('=')[-1])/float(1024*1000)
max_softspace = float((outline.split(',')[-5]).split('=')[-1])/float(1024*1000)
free_mem = max_hardspace - space_used
# 1 MB is reserved to prevent total hardspace usage. This reserve can be made larger if problems still arise.
mem_to_use = (((fileSize)/1024)/1024) + 1
check_space = free_mem - mem_to_use
if (check_space < 0):
return 0, mem_to_use, free_mem
elif (check_space >= 0):
return 1, mem_to_use, free_mem
def get_mem(NAT, Field_status):
"""
Returns the memory amount estimated for the current settings.
Based on Number of Dipoles, Nearfield Calculation (on/off).
"""
memory_usage = 0
venue_name = ''
if Field_status != '0':
memory_usage = (float(NAT)*0.016 + 42)
elif Field_status == '0':
memory_usage = (float(NAT)*0.002 + 42)
if (memory_usage <= 16000):
venue_name = 'rcac_S'
elif (memory_usage <= 32000):
venue_name = 'rcac_M'
elif (memory_usage <= 48000):
venue_name = 'rcac_L'
elif (memory_usage <= 64000):
venue_name = 'rcac_XL'
elif (memory_usage <= 128000):
venue_name = 'rcac_XXL'
elif (memory_usage <= 192000):
venue_name = 'rcac_XXXL'
elif (memory_usage > 192000):
venue_name = 'invalid_problem_size'
return memory_usage, venue_name
def data_NF235_LIST(par1, par2, par3):
"""
Selects the correct value for DDSCAT's volume extension memory requirements.
Essentially a recalculation of the X,Y,Z dimensions of the shape for calculation purposes.
Returns the MXNX, MXNY, MXNZ values needed for the memory allocation.
"""
NF235_list = [1,2,3,4,5,6,8,9,10,12,15,16,18,20,24,25,27,
30,32,36,40,45,48,50,54,60,64,72,75,80,81,90,96,100,
108,120,125,128,135,144,150,160,162,180,192,200,216,225,
240,243,250,256,270,288,300,320,324,360,375,384,400,405,
432,450,480,486,500,512,540,576,600,625,640,648,675,720,
729,750,768,800,810,864,900,960,972,1000,1024,1080,1125,
1152,1200,1215,1250,1280,1296,1350,1440,1458,1500,1536,
1600,1620,1728,1800,1875,1920,1944,2000,2025,2048,2160,
2187,2250,2304,2400,2430,2500,2560,2592,2700,2880,2916,
3000,3072,3125,3200,3240,3375,3456,3600,3645,3750,3840,
3888,4000,4050,4096]
current_par1 = 0
current_par2 = 0
current_par3 = 0
while(par1 != NF235_list[current_par1]):
if par1 > NF235_list[current_par1]:
current_par1 = current_par1 + 1
elif par1 <= NF235_list[current_par1]:
par1 = NF235_list[current_par1]
while(par2 != NF235_list[current_par2]):
if par2 > NF235_list[current_par2]:
current_par2 = current_par2 + 1
elif par2 <= NF235_list[current_par2]:
par2 = NF235_list[current_par2]
while(par3 != NF235_list[current_par3]):
if par3 > NF235_list[current_par3]:
current_par3 = current_par3 + 1
elif par3 <= NF235_list[current_par3]:
par3 = NF235_list[current_par3]
return par1, par2, par3
def remove_all_w(num_jobs):
"""
Find all the w0** files and remove them. Each wavelength can generate a new w0** file of each type.
"""
for n in range(0,num_jobs):
for filename in ('w{0}r000k000.sca'.format(str(n).zfill(3)), \
'w{0}r000.avg'.format(str(n).zfill(3)), 'w{0}r000k000.fml'.format(str(n).zfill(3)),\
'w{0}r000k000.E1'.format(str(n).zfill(3)), 'w{0}r000k000.E2'.format(str(n).zfill(3)), \
'w{0}r000k000.pol1'.format(str(n).zfill(3)), 'w{0}r000k000.pol2'.format(str(n).zfill(3)),\
'w{0}r000k000.EB1'.format(str(n).zfill(3)), 'w{0}r000k000.EB2'.format(str(n).zfill(3))):
if os.path.exists(filename):
os.remove(filename)
def find_stderr(sign, path):
"""
Find all the files with the specified tag .stderr in the path.
Returns (respectively):
A formatted log of errors found,
a concatenated list of outputs,
a formatted list of outputs.
"""
output = ''
stdout_list_log = ''
stdout_list_cat = ''
wave_regex = re.compile(r'1 wavelengths from\s+(.+) to\s+\1')
wavelength_vals = []
avoid_dupes = []
avoid_dupes2 = []
for root, _, files in os.walk(path):
for fil in files:
if fil.endswith(".stderr") == True:
with open(os.path.join(root, fil), 'r') as output_file:
for line in output_file:
wave_match = wave_regex.search(line)
if wave_match is not None:
wavelength = float(wave_match.group(1))
wavelength_vals.append((wavelength,os.path.join(root, fil)))
wavelength_vals.sort(key=lambda x: x[0])
for wavelength, filepather in wavelength_vals:
with open(filepather, 'r') as output_filer:
if filepather not in avoid_dupes:
output += output_filer.read()
output += '\n +++ Next Output File +++ \n'
avoid_dupes.append(filepather)
for wavelength, filepather in wavelength_vals:
replacedfilepath = '{0}'.format(filepather.replace('.stderr','.stdout'))
with open(replacedfilepath, 'r') as output_filer2:
if replacedfilepath not in avoid_dupes2:
output_red = output_filer2.read()
stdout_list_log += '\n Wavelength (um): {0} \n'.format(wavelength)
stdout_list_log += output_red
stdout_list_cat += output_red
stdout_list_log += '\n +++ Next Error File +++ \n'
avoid_dupes2.append(replacedfilepath)
return stdout_list_log, stdout_list_cat, output
def find_regex(regex, path):
"""Find all the files that match the specified regex in path.
Returns a generator of file names.
"""
regex_c = re.compile(regex)
for root, _, files in os.walk(path):
for name in files:
if regex_c.match(name) is not None:
yield os.path.join(root, name)
def parse_table(filename):
"""Parse a DDSCAT output table.
filename The file name of the input table.
Returns a tuple (header, data) where:
header A string containing the table header.
data A list of table rows as strings.
"""
with open(filename, 'r') as table:
table_lines = table.readlines()
# Find the last line of the header.
i = len(table_lines) - 1
for line in reversed(table_lines):
if line != '' and line[0] not in string.digits:
i += 1
break
i -= 1
header = ''.join(table_lines[:i])
data = table_lines[i:]
return header, data
def parse_output_log():
"""Parse the Output being sent to the log
Returns an error message if one is needed/found.
Returns a '0' if no error is found.
"""
parse_this = driver.get('output.log(output_log)')
list_this = parse_this.split('\n')
error_msg_val = ''
append_now = 0
for line in list_this:
if (append_now == 1):
error_msg_val = error_msg_val + '\n' + line
if (re.search('sigterm',line) or re.search('forrtl',line)):
error_msg_val = error_msg_val + '\n' + line
append_now = 1
if (parse_this == ''):
error_msg_val = 'No Output Log was Generated!'
if (append_now == 1):
return error_msg_val
elif (append_now == 0):
return '0'
def collate_table(output_name, partial_names, sort_slice):
"""Create a DDSCAT output table from a list of partial tables.
output_name The file name of the output table.
partial_names A list of table file names to merge.
sort_slice In a fixed width table, the column range of the sort key.
Returns Wavelength with Max Light Extinction.
Also, secondarily handles selecting a maximum E-Field from a list of E-Fields when applicable.
This is because the best time to catch such handling occurs when collating a qtable.
"""
with open(output_name, 'w') as table:
header = None
header2 = None
data = []
data2 = []
Efield_path = "0"
max_field_tuple = [(0,0)]
for partial_name in partial_names:
header, partial_data = parse_table(partial_name)
data += partial_data
data2.append((partial_data,partial_name))
data.sort(key=lambda row: float(row[sort_slice]))
table.write(header)
for row in data:
table.write(row)
with open(output_name, 'r') as table:
if (output_name == 'qtable'):
b = []
read_buffer = 0
max_Qext_name = '1'
for line in table:
a = line
if (read_buffer==1):
b.append((a.split()[1],a.split()[2]))
if (re.search('wave Q_ext',a)):
read_buffer=1
if b == []:
b.append('0 1')
try:
max_Qext = max(b, key=lambda x:float(x[1]))
except ValueError:
max_Qext = ('0 1')
for item,key in data2:
try:
if (re.search('{0}'.format(max_Qext[0]),('{0}'.format(item[0])).split()[1])):
max_Qext_name = '{0}'.format(key)
except IndexError:
1
Efield_path = max_Qext_name
working_path = os.path.join(os.getcwd(), 'w000r000k000.E1')
working_pathB = os.path.join(os.getcwd(), 'w000r000k000.EB1')
Efield_path = os.path.join(max_Qext_name.split('/qtable')[0],'w000r000k000.E1')
EBfield_path = os.path.join(max_Qext_name.split('/qtable')[0],'w000r000k000.EB1')
if os.path.exists(Efield_path):
os.rename(Efield_path,working_path)
if os.path.exists(EBfield_path):
os.rename(EBfield_path,working_pathB)
return max_Qext[0]
def local_maxqext_grab(output_name, partial_names, sort_slice, bfield):
"""
Returns Wavelength with Max Light Extinction for locally run simulations.
Also, secondarily handles selecting a maximum E-Field from a list of E-Fields when applicable.
This is because the best time to catch such handling occurs when parsing the qtable.
"""
b = []
read_buffer = 0
max_Qext_name = '1'
with open ('qtable','r') as table:
for line in table:
a = line
if (read_buffer==1):
b.append((a.split()[1],a.split()[2]))
if (re.search('wave Q_ext',a)):
read_buffer=1
if b == []:
b.append('0 1')
max_Qext = max(b, key=lambda x:float(x[1]))
count = 0
save_count = 0
for item, key in b:
if item == max_Qext[0]:
save_count = count
count = count + 1
if len('{0}'.format(save_count)) == 1:
save_count = '00{0}'.format(save_count)
elif len('{0}'.format(save_count)) == 2:
save_count = '0{0}'.format(save_count)
Efield_to_use = 'w000r000k000.E1'
EBfield_to_use = 'w000r000k000.EB1'
return Efield_to_use, EBfield_to_use, max_Qext[0]
def BuildVTKfiles(RawDataFile, squareval, getSecret, gsX, gsY, gsZ):
"""Using the Raw data from DDSCAT Build the
data files for the : E-field, log E-field, E-field Vectors,
and if requested in addition: B-field, B-field Vectors, Poynting Vectors
"""
read_x = 0
read_y = 0
read_z = 0
min_x = 999999999
min_y = 999999999
min_z = 999999999
min_e = 999999999
max_x = -999999999
max_y = -999999999
max_z = -999999999
max_e = -999999999
num_pts_x = 0
num_pts_y = 0
num_pts_z = 0
exc = {}
eyc = {}
ezc = {}
eec = []
eev = []
bbc = []
bbv = []
pv = []
secretdata = []
with open(RawDataFile,'r') as getdata:
for line in getdata:
if re.search('Xcoord',line) or (line == "") or (line == "\n"):
pass
elif re.search('Dimensions',line):
num_pts_x = int(line.split()[-3])
num_pts_y = int(line.split()[-2])
num_pts_z = int(line.split()[-1])
else:
xval_in = round(float(line.split()[0]),4)
yval_in = round(float(line.split()[1]),4)
zval_in = round(float(line.split()[2]),4)
eval_in = round(float(line.split()[3]),6)
exRval_in = float(line.split('(')[1].split(',')[0])
exIval_in = float(line.split('(')[1].split(',')[0].split(')')[0])
eyRval_in = float(line.split('(')[2].split(',')[0])
eyIval_in = float(line.split('(')[2].split(',')[0].split(')')[0])
ezRval_in = float(line.split('(')[3].split(',')[0])
ezIval_in = float(line.split('(')[3].split(',')[0].split(')')[0])
bon_in = float(line.split('(')[3].split(',')[1].split(')')[1].split()[0])
bval_in = float(line.split('(')[3].split(',')[1].split(')')[1].split()[1])
bxRval_in = float(line.split('(')[4].split(',')[0])
bxIval_in = float(line.split('(')[4].split(',')[0].split(')')[0])
byRval_in = float(line.split('(')[5].split(',')[0])
byIval_in = float(line.split('(')[5].split(',')[0].split(')')[0])
bzRval_in = float(line.split('(')[6].split(',')[0])
bzIval_in = float(line.split('(')[6].split(',')[0].split(')')[0])
px_in = float(line.split('(')[6].split(',')[1].split(')')[1].split()[0])
py_in = float(line.split('(')[6].split(',')[1].split(')')[1].split()[1])
pz_in = float(line.split('(')[6].split(',')[1].split(')')[1].split()[2])
exc[xval_in] = xval_in
eyc[yval_in] = yval_in
ezc[zval_in] = zval_in
eec.append(eval_in)
if (squareval == "2"):
exRval_in = exRval_in**2
eyRval_in = eyRval_in**2
ezRval_in = ezRval_in**2
bxRval_in = bxRval_in**2
byRval_in = byRval_in**2
bzRval_in = bzRval_in**2
px_in = px_in**2
py_in = py_in**2
pz_in = pz_in**2
eev.append((exRval_in,eyRval_in,ezRval_in))
bbc.append(bval_in)
bbv.append((bxRval_in,byRval_in,bzRval_in))
pv.append((px_in,py_in,pz_in))
if (xval_in < min_x):
min_x = xval_in
if (xval_in > max_x):
max_x = xval_in
if (yval_in < min_y):
min_y = yval_in
if (yval_in > max_y):
max_y = yval_in
if (zval_in < min_z):
min_z = zval_in
if (zval_in > max_z):
max_z = zval_in
if (eval_in < min_e):
min_e = eval_in
if (eval_in > max_e):
max_e = eval_in
if getSecret == "On":
if gsX == "-1000":
setX = 0
else:
setX = 1
if gsY == "-1000":
setY = 0
else:
setY = 1
if gsZ == "-1000":
setZ = 0
else:
setZ = 1
if ((round(float(gsX),4) == round(xval_in,4)) and setX == 1):
wantX = 1
else:
wantX = 0
if ((round(float(gsY),4) == round(yval_in,4)) and setY == 1):
wantY = 1
else:
wantY = 0
if ((round(float(gsZ),4) == round(zval_in,4)) and setZ == 1):
wantZ = 1
else:
wantZ = 0
if (wantX == setX) and (wantY == setY) and (wantZ == setZ):
secretdata.append(line)
# First, we'll write some of that vector data to some basic text for Rappture
# Could be re-structured if there are too many lines coming from a large simulation
exc = sorted(exc)
eyc = sorted(eyc)
ezc = sorted(ezc)
if getSecret == "On":
with open('secret_data','w') as sd:
sd.write(' Xcoord Ycoord Zcoord EField EField-X(Re) EField-X(Im) EField-Y(Re) EField-Y(Im) EField-Z(Re) EField-Z(Im) B On/Off Bfield BField-X(Re) BField-X(Im) BField-Y(Re) BField-Y(Im) BField-Z(Re) BField-Z(Im) Poynting X Poynting Y Poynting Z \n')
for item in secretdata:
sd.write(item)
with open('EField_Vec','w') as evecfile:
for item in eev:
evecfile.write('{0} {1} {2}\n'.format(item[0],item[1],item[2]))
if float(bon_in) == 1:
with open('BField_Vec','w') as bvecfile:
for item in bbv:
bvecfile.write('{0} {1} {2}\n'.format(item[0],item[1],item[2]))
with open('Poynting_Vec','w') as pvecfile:
for item in pv:
pvecfile.write('{0} {1} {2}\n'.format(item[0],item[1],item[2]))
# Then write the VTKs
# At least everything is sorted already!
fdx = num_pts_x
fdy = num_pts_y
fdz = num_pts_z
lc = 0
with open('headerVTK','w') as vtkf:
vtkf.write("# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET RECTILINEAR_GRID\n")
vtkf.write("DIMENSIONS {0} {1} {2}\n".format(fdx,fdy,fdz))
vtkf.write("X_COORDINATES {0} float\n".format(fdx))
for item in exc:
vtkf.write("{0} ".format(item))
lc += 1
if lc == 9:
vtkf.write("\n")
lc = 0
vtkf.write("\n")
lc=0
vtkf.write("Y_COORDINATES {0} float\n".format(fdy))
for item in eyc:
vtkf.write("{0} ".format(item))
lc += 1
if lc == 9:
vtkf.write("\n")
lc = 0
vtkf.write("\n")
lc=0
vtkf.write("Z_COORDINATES {0} float\n".format(fdz))
for item in ezc:
vtkf.write("{0} ".format(item))
lc += 1
if lc == 9:
vtkf.write("\n")
lc = 0
vtkf.write("\n")
lc=0
fdxyz = (int(float(fdx)*float(fdy)*float(fdz)))
vtkf.write("POINT_DATA {0}\n".format(fdxyz))
# Since we throw compositions into the regular E-field view as one...
# We need to have a different ending to the header there (than B-field, log-field).
shutil.copyfile('headerVTK','EField_VTK.vtk')
shutil.copyfile('headerVTK','EField_VTK_logscale.vtk')
if float(bon_in) == 1:
shutil.copyfile('headerVTK','BField_VTK.vtk')
with open('BField_VTK.vtk','a') as vtkf:
vtkf.write('FIELD FieldData 1\n')
vtkf.write('Intensity 1 {0} float\n'.format(fdxyz))
with open('EField_VTK.vtk','a') as vtkf:
vtkf.write('FIELD FieldData 2\n')
vtkf.write('Intensity 1 {0} float\n'.format(fdxyz))
with open('EField_VTK_logscale.vtk','a') as vtkf:
vtkf.write('FIELD FieldData 1\n')
vtkf.write('Intensity 1 {0} float\n'.format(fdxyz))
with open('EField_VTK.vtk','a') as efile, open('EField_VTK_logscale.vtk','a') as elogfile:
for item in eec:
efile.write('{0} '.format(item))
elogfile.write('{0} '.format(math.log(item)))
lc += 1
if lc == 9:
efile.write("\n")
elogfile.write("\n")
lc = 0
efile.write('\n')
elogfile.write('\n')
lc=0
# Note, skipping compositions for now!
# It may be helpful to be loading E-fields without composition data...
# ...so that we can load larger files and faster!
#
# Otherwise something like this will follow, but you need CompositionData still:
#vtkf.write('FIELD FieldData 1\n')
efile.write('Composition 1 {0} float\n'.format(fdxyz))
# Read some composition data in
with open('composition.txt','r') as compo:
lc = 0
CompositionData = compo.read()
for item in CompositionData.split():
try:
efile.write('{0} '.format(int(float(item))))
except ValueError:
pass
lc += 1
if lc == 9:
efile.write("\n")
lc = 0
efile.write('\n')
lc=0
if os.path.exists('BField_VTK.vtk'):
with open('BField_VTK.vtk','a') as bfile:
for item in bbc:
bfile.write('{0} '.format(item))
lc += 1
if lc == 9:
bfile.write("\n")
lc = 0
bfile.write('\n')
# Again, could also potentially place some Composition data in the B-Field plot at this point.
os.remove('headerVTK')
return min_x,min_y,min_z,min_e,max_x,max_y,max_z,max_e,num_pts_x,num_pts_y,num_pts_z
def get_timing_info(output_string):
"""Get the total time taken to run a DDSCAT job from a string containing
its stderr output. Also return the wavelength processed if the file
only contains one.
Returns (wavelength, total time).
"""
total_time = 0.0
wavelength = None
time_regex = re.compile(r'(\d+\.\d{3}) (= CPU time)')
time_regex2 = re.compile(r'(\d+\.) (= CPU time)')
wave_regex = re.compile(r' >DDSCAT \s+(.+) = WAVE =')
for line in output_string.split('\n'):
time_match = time_regex.search(line)
if time_match is not None:
total_time += float(time_match.group(1))
time_match2 = time_regex2.search(line)
if time_match2 is not None:
total_time += float(time_match2.group(1))
wave_match = wave_regex.search(line)
if wave_match is not None:
wavelength = float(wave_match.group(1))
return wavelength, total_time
def get_timing_info_local(output_string):
"""Get the total time taken to run a DDSCAT job from a string containing
its stderr output. Also return the wavelength processed if the file
only contains one.
Returns (wavelength, total time) array.
"""
total_time = 0.0
wavelength = None
save_wavelength = -1.0
wavelength_times = []
buffer1 = 0
buffer2 = 0
time_regex = re.compile(r'(\d+\.\d{3}) (= CPU time)')
time_regex2 = re.compile(r'(\d+\.) (= CPU time)')
wave_regex = re.compile(r' >DDSCAT \s+(.+) = WAVE =')
for line in output_string.split('\n'):
wave_match = wave_regex.search(line)
if wave_match is not None:
wavelength = float(wave_match.group(1))
buffer2 = buffer2 + 1
time_match = time_regex.search(line)
if time_match is not None:
save_wavelength = wavelength
total_time += float(time_match.group(1))
buffer1 = buffer1 + 1
time_match2 = time_regex2.search(line)
if time_match2 is not None:
save_wavelength = wavelength
total_time += float(time_match2.group(1))
buffer1 = buffer1 + 1
if (buffer1 != 0) and (buffer2 == 2):
buffer1 = 0
buffer2 = 1
wavelength_times.append((save_wavelength, total_time))
total_time = 0
wavelength_times.append((save_wavelength,total_time))
return wavelength_times
def gen_custom_diels(diel_num_list):
""" Given that custom constant dielectrics are expected, write and name the files appropriately
Inputs:
Tuple list of dieletric file number expected (i.e. 1,3,5,7 for diel1, diel3, diel5, diel7)
and list of values used to build the corresponding files.
Generates:
Dielectric constant value files for every item in diel_num_list.
Gives same dielectric file names as uploaded dielectric files.
"""
for item, value in diel_num_list:
if (os.path.getsize('custom_dielectric{0}'.format(item)) == 0):
with open('custom_dielectric{0}'.format(item),'w') as cust_diel:
cust_diel.write("Internally Generated Constant Refractive Index for Dielectric{0}\n".format(item))
cust_diel.write("1 2 3 0 0 0 = columns for wave, Re(n), Im(n), eps1, eps2\n")
wave_list = [x/float(1000) for x in range (1,1001)]
for wave_entry in wave_list:
cust_diel.write("{0} {1} 1.000E-6\n".format(wave_entry, value))
# Nanobio node
# Courtesy of Nahil Sobh, University of Illinois
def rotate3D(theta_X, theta_Y, theta_Z):
"""
========================================
Rotates the Coordinates Axes given the:
1- Rotation Around X denoted by Theta_X
2- Rotation Around Y denoted by Theta_Y
3- Rotation Around Z denoted by Theta_Z
========================================
"""
theta_X = np.radians(theta_X)
theta_Y = np.radians(theta_Y)
theta_Z = np.radians(theta_Z)
Sx = np.sin(theta_X)
Cx = np.cos(theta_X)
Sy = np.sin(theta_Y)
Cy = np.cos(theta_Y)
Sz = np.sin(theta_Z)
Cz = np.cos(theta_Z)
Rx = np.array( [ [ 1, 0, 0] ,
[ 0, Cx, -Sx] ,
[ 0, Sx, Cx] ], dtype=np.float)
Ry = np.array( [ [ Cy, 0, Sy] ,
[ 0, 1, 0] ,
[ -Sy, 0, Cy] ], dtype=np.float)
Rz = np.array( [ [ Cz, -Sz, 0] ,
[ Sz, Cz, 0] ,
[ 0, 0, 1] ], dtype=np.float)
Rxyz = np.dot(Rz,np.dot(Ry,Rx))
a1 = Rxyz[:,0]
a2 = Rxyz[:,1]
a3 = Rxyz[:,2]
phi = np.degrees(np.arctan2( Rxyz[2,0],Rxyz[1,0] ))
beta = np.degrees(np.arctan2((-1 * Rxyz[0,2]),Rxyz[0,1]))
theta = np.degrees(np.arctan2(np.sqrt(np.square(Rxyz[0,1])+np.square(Rxyz[0,2])),Rxyz[0,0]))
return phi, beta, theta
# Deprecated function!
def percent_timer(num_jobs, wall_time, sub_path):
""" Populates a list with the simulation job IDs and increments percentage bar based on their completion/walltime.
Increments percentage bar based on jobs that have initiated a 'Running' state or are 'Done'.
Current Status: Disabled due to threading not being a good solution.
"""
queue_id = [0]
job_id = [0]
first_running_id = [0]
done_id=[0]
while_buffer = 0
n = 0
current_sum = 20
queue_regex = re.compile(r'\((\d+)\) Simulation Queued at')
job_regex = re.compile(r'\((\d+)\) Job Submitted at')
running_regex = re.compile(r'\((\d+)\) Simulation Running at')
done_regex = re.compile(r'\((\d+)\) Simulation Done at')
while (sorted(done_id) != sorted(queue_id)) or (while_buffer == 0):
with open(os.path.join(sub_path,'submit_log'), 'rw') as submit_stream:
for line in submit_stream:
queue_match = queue_regex.search(line)
job_match = job_regex.search(line)
running_match = running_regex.search(line)
done_match = done_regex.search(line)
if (queue_match) and (queue_match.group(1) not in queue_id):
queue_id.append(queue_match.group(1))
while_buffer = 1
if (job_match) and (job_match.group(1) not in job_id):
job_id.append(job_match.group(1))
while_buffer = 1
if (running_match) and (running_match.group(1) not in first_running_id):
n = n+1
first_running_id.append(running_match.group(1))
current_sum = 10 + n*(40/num_jobs)
Rappture.Utils.progress(*(int(current_sum), "Running DDSCAT..."))
queue_id = list(set(queue_id)|set(job_id))
if (done_match) and (done_match.group(1) not in done_id):
n = n+1
done_id.append(done_match.group(1))
current_sum = 10 + n*(40/num_jobs)
Rappture.Utils.progress(*(int(current_sum), "Running DDSCAT..."))
def progress():
try:
stage = progress_stages.pop(0)
except IndexError:
return
Rappture.Utils.progress(*stage)
# ==================== The main Program starts here ======================
if __name__ == "__main__":
progress_stages = [(0, "Initializing DDSCAT..."),
(10, "Running DDSCAT..."),
(90, "Loading output files..."),
]
progress()
# Open driver
driver_name = sys.argv[1]
driver = Rappture.library(driver_name)
if driver is None:
print "Error opening file " + driver_name
exit(-1)
driver_number = Rappture.tools.getDriverNumber(driver_name)
# Get the tool root directory
tool_path = sys.argv[2]
if tool_path == "":
tool_path = os.path.dirname(driver.get('tool.version.application.directory(tool)'))
# Pre-processing cleanup in case of an aborted simulation
for filename in ('custom_dielectric1','custom_dielectric2','custom_dielectric3','custom_dielectric4',
'custom_dielectric5','custom_dielectric6','custom_dielectric7','custom_dielectric8',
'custom_dielectric9', 'mtable','qtable', 'qtable2', 'qtable_data',
'Styles', 'field_datafile.txt',
'EField_VTK.vtk','BField_VTK.vtk','EField_VTK_logscale.vtk',
'EField_Vec','BField_Vec','Poynting_Vec',
'AuDiel.tab','AgDiel.tab','Pt_diel.tab','Pd_diel.tab','Cu_diel.txt','TiO2','SiO2',
'shape.dat', 'target.out', 'ddscat.par', 'composition.txt',
'w000r000k000.sca','w000r000.avg','w000r000k000.fml','zipfilename','zipfilename_VTK'):
if os.path.exists(filename):
os.remove(filename)
if os.path.exists('submit_results'):
shutil.rmtree('submit_results', ignore_errors = True)
if os.path.exists('submit_results_efield'):
shutil.rmtree('submit_results_efield', ignore_errors = True)
# Set the Failure flag to initially not failed.
ddscat_fail_flag = '0'
ddscat_fail_message = ''
parameter_groups = (
('input.phase(page1).group(options).group(hide_CSHAPE).choice({0})', ('CSHAPE',)),
('input.phase(page1).group(options).group(NCOMP_SET).integer({0})', ('NCOMP',)),
('input.phase(page1).group(options).group(NCOMP_SET1).integer({0})', ('NCOMP1',)),
('input.phase(page1).group(options).group(NCOMP_SET2).integer({0})', ('NCOMP2',)),
('input.phase(page1).group(options).group(NCOMP_SET3).integer({0})', ('NCOMP3',)),
('input.phase(page1).group(options).group(customSHPARs).number({0})', ('customDDIST',)),
('input.phase(page1).group(options).group(SHPARs).number({0})', ('SHPAR1','SHPAR2','SHPAR3','SHPAR4','SHPAR5','SHPAR6','DDIST')),
('input.phase(page3).group(advanced).choice({0})', ('CMDTRQ', 'CMDSOL', 'CALPHA', 'GAMMA')),
('input.phase(page3).group(advanced).number({0})', ('ETASCA', 'TOL')),
('input.phase(page3).group(advanced).integer({0})', ('NPLANES',)),
('input.phase(page3).group(NRFLD_HEAD).choice({0})', ('NRFLD',)),
('input.phase(page5).group(process).integer({0})', ('MXITER',)),
('input.phase(page3).group(NRFLD_increase).number({0})',
('NRFLD_r1', 'NRFLD_r2', 'NRFLD_r3', 'NRFLD_r4', 'NRFLD_r5', 'NRFLD_r6')),
('input.phase(page2).group(Wavelengths).number({0})', ('WAVINI', 'WAVEND')),
('input.phase(page2).group(Wavelengths).integer({0})', ('NWAV',)),
('input.phase(page2).group(Wavelengths).choice({0})', ('WCDIVID',)),
('input.phase(page2).group(Wavelengths).string({0})', ('WAV_table',)),
('input.phase(page1).group(options).group(Ambient).number({0})', ('NAMBIENT',)),
('input.phase(page1).boolean({0})', ('IORTH',)),
('input.phase(page1).group(options).group(Polarization).group(X).number({0})', ('X1', 'X2')),
('input.phase(page1).group(options).group(Polarization).group(Y).number({0})', ('Y1', 'Y2')),
('input.phase(page1).group(options).group(Polarization).group(Z).number({0})', ('Z1', 'Z2')),
('input.phase(page1).group(options).group(Rotations).group(Beta).number({0})', ('BETA',)),
('input.phase(page1).group(options).group(Rotations).group(Theta).number({0})', ('THET',)),
('input.phase(page1).group(options).group(Rotations).group(Phi).number({0})', ('PHI',)),
)
params = {}
for group, param_names in parameter_groups:
for param_name in param_names:
params[param_name] = driver.get(group.format(param_name)+'.current')
params['CMDFFT'] = 'GPFAFT' # Do not use the Intel MKL library
params['CBINFLAG'] = 'NOTBIN' # Do not write binary files
params['IWRKSC'] = '0' # Write a .sca file for each target orientation
period_type = driver.get('input.phase(page3).choice(PERIOD).current')
params['SHPAR7']=''
params['FRAME_TYPE'] = 'LFRAME'
params['NPLANE_TEXT']=' = NPLANES = number of scattering planes'
params['PERIODIC_SCATTERING_ORDERS'] = ''
brotX,brotY,brotZ = check_lightshuttle()
LightType = driver.get('input.phase(page1).group(options).group(Rotations).group(ILight).boolean(ILIGHT).current')
if LightType == 'no':
brotX,brotY,brotZ = 0,0,0
initialrotX = float(params['PHI'])
initialrotY = float(params['BETA'])
initialrotZ = float(params['THET'])
rotX = brotX + initialrotX
rotY = brotY + initialrotY
rotZ = brotZ + initialrotZ
rotPhi,rotBeta,rotTheta = rotate3D(rotX,rotY,rotZ)
params['PHI'] = '{0}'.format(rotPhi)
params['BETA'] = '{0}'.format(rotBeta)
params['THET'] = '{0}'.format(rotTheta)
cudiel = {}
# Perform data rewrite for Polarization types
# Technically this could be done in the tool.xml via an example uploader (future implementation).
PolType = driver.get('input.phase(page1).group(options).group(Polarization_set).choice(Polar_choice).current')
if PolType == '1':
params['Y1']= '1'
params['Y2']= '0'
params['Z1']= '0'
params['Z2']= '0'
elif PolType == '2':
params['Y1']= '0'
params['Y2']= '0'
params['Z1']= '1'
params['Z2']= '0'
elif PolType == '3':
params['Y1']= '1'
params['Y2']= '0'
params['Z1']= '0'
params['Z2']= '1'
elif PolType == '4':
params['Y1']= '1'
params['Y2']= '0'
params['Z1']= '0'
params['Z2']= '-1'
elif PolType == '5':
params['Y1']= '1'
params['Y2']= '0'
params['Z1']= '0'
params['Z2']= '0'
params['IORTH']= 'yes'
# Perform data rewrite for custom dielectrics being used.
for i in range(1,10):
check_diel = ''
check_diel = driver.get('input.phase(page1).group(options).group(dielectrics1to9).group(truediel{0}).loader(compload{0}).current'.format(i))
if check_diel == 'Uploaded data':
cudiel[i] = '1'
else:
cudiel[i] = driver.get('input.phase(page1).group(options).group(dielectrics1to9).group(truediel{0}).choice(CDIEL{0}).current'.format(i))
# Note that the current version of DDSCAT (7.3) forces the custom shape file to be named 'shape.dat'
# Thus, a copy of the shuttle file has to be made in order to preserve unique shuttles.
sessionnum = os.getcwd().split('/')[-1]
check_variable = driver.get('input.phase(page1).group(options).group(uploader).loader(loaded).current')
cshape_check = driver.get('input.phase(page1).group(options).group(hide_CSHAPE).choice(CSHAPE).current')
input_shape_filename = 'shape.dat'
# If the input file is an uploaded file:
# Place any custom shape files in the same directory as the parameter file
if (check_variable == 'Uploaded data'):
cshape_check = '8'
driver.put('input.phase(page1).group(options).group(hide_CSHAPE).choice(CSHAPE).current','8')
input_shape_filename = 'shape.dat'
input_shape_data = driver.get(driver.get('input.phase(page1).group(options).group(uploader).loader(loaded).upload.to') + '.current')
driver.put("input.phase(page1).group(options).group(uploader).string(UploadedFile).current","empty",append=0)
with open(input_shape_filename, 'w') as in_shape_file:
in_shape_file.write(input_shape_data)
in_shape_file = ""
input_shape_data = ""
with open(input_shape_filename,'r') as in_shape_file:
read_dip_counter = 0
dipole_NAT = 0
ddim_array_x = []
ddim_array_y = []
ddim_array_z = []
for line in in_shape_file:
if read_dip_counter == 1:
dip_array = line.split()
try:
ddim_array_x.append(int(float(dip_array[1])))
ddim_array_y.append(int(float(dip_array[2])))
ddim_array_z.append(int(float(dip_array[3])))
dipole_NAT = dipole_NAT + 1
except IndexError:
pass
except ValueError:
pass
if 'ICOMP(x,y,z)' in line.split():
read_dip_counter = 1
try:
ddip1 = (max(ddim_array_x) - min(ddim_array_x)) + 1
ddip2 = (max(ddim_array_y) - min(ddim_array_y)) + 1
ddip3 = (max(ddim_array_z) - min(ddim_array_z)) + 1
NAT2x, NAT2y, NAT2z = data_NF235_LIST(int(float(ddip1)),int(float(ddip2)),int(float(ddip3)))
except (ValueError,IndexError):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nNo valid Input Object File was found.\n"
fileSize = os.path.getsize('shape.dat')
mcheck1, sizeMB1, freemem1 = memory_check_filesize(fileSize)
mcheck2, sizeMB2, freemem2 = memory_check(dipole_NAT)
sizeMB = sizeMB1 + sizeMB2
mcheck = mcheck1 + mcheck2
if (mcheck == 0):
ddscat_fail_flag = '1'
ddscat_fail_message += "\n\nThe simulation/conversion requested\n requires more disk space than the user has available.\n The disk space required is approximately {0}MB.\n The disk available is {1}MB.\n".format(sizeMB, freemem1)
# If the input file is a shuttle file:
if (cshape_check == '9') and (check_variable != 'Uploaded data'):
check_variable = 'Uploaded data'
for root, _, files in os.walk('/tmp/'):
for fil in files:
if fil.endswith(".elttuhs"+sessionnum) == True:
this_shuttle = os.path.join(root,fil)
# Check that the user has enough space to make the copy
fileSize = os.path.getsize(this_shuttle)
with open(this_shuttle,'r') as getThatNAT:
read_dip_counter = 0
dipole_NAT = 0
ddim_array_x = []
ddim_array_y = []
ddim_array_z = []
for line in getThatNAT:
if read_dip_counter == 1:
dip_array = line.split()
try:
ddim_array_x.append(int(float(dip_array[1])))
ddim_array_y.append(int(float(dip_array[2])))
ddim_array_z.append(int(float(dip_array[3])))
dipole_NAT = dipole_NAT + 1
except IndexError:
pass
except ValueError:
pass
if 'ICOMP(x,y,z)' in line.split():
read_dip_counter = 1
try:
ddip1 = (max(ddim_array_x) - min(ddim_array_x)) + 1
ddip2 = (max(ddim_array_y) - min(ddim_array_y)) + 1
ddip3 = (max(ddim_array_z) - min(ddim_array_z)) + 1
NAT2x, NAT2y, NAT2z = data_NF235_LIST(int(float(ddip1)),int(float(ddip2)),int(float(ddip3)))
except ValueError:
ddscat_fail_flag = '1'
ddscat_fail_message += "\nNo valid Input Object File was found.\n"
break
mcheck1, sizeMB1, freemem1 = memory_check_filesize(fileSize)
mcheck2, sizeMB2, freemem2 = memory_check(dipole_NAT)
sizeMB = sizeMB1 + sizeMB2
mcheck = mcheck1 + mcheck2
if (mcheck == 0):
ddscat_fail_flag = '1'
ddscat_fail_message += "\n\nThe simulation/conversion requested\n requires more disk space than the user has available.\n The disk space required is approximately {0}MB.\n The disk available is {1}MB.\n".format(sizeMB, freemem1)
break
with open(this_shuttle,'r') as shuttle_file:
with open('shape.dat', 'w') as in_shape_file:
check_space = shuttle_file.readline()
if check_space != '':
input_data = check_space
input_data += shuttle_file.read()
in_shape_file.write(input_data)
# driver.put('input.phase(page1).group(options).group(uploader).string(UploadedFile).current', input_data)
# driver.put('input.phase(page1).group(options).group(uploader).loader(loaded).current', 'Uploaded data')
input_data = ""
shuttle_file = ""
in_shape_file = ""
if fil.endswith(".nmelttuhs"+sessionnum) == True:
this_shuttel = os.path.join(root,fil)
with open(this_shuttel,'r') as ts:
a = ts.readline()
b = ts.readline()
params['customDDIST'] = a.split()[-1]
driver.put('input.phase(page1).group(options).group(customSHPARs).number(customDDIST).current',params['customDDIST'])
# params['NCOMP'] = b.split()[-1]
if check_variable == 'Uploaded data':
params['CSHAPE'] = '8'
count_ncomp = int(params['NCOMP'])
for n in range(count_ncomp+1, 10):
check_ncomp = cudiel[n]
if check_ncomp != 'None':
cudiel[n] = '5'
for n in range(2,int(params['NCOMP'])+1):
check_ncomp = cudiel[n]
if check_ncomp == '5':
count_ncomp = count_ncomp - 1
params['NCOMP'] = ('{0}'.format(count_ncomp))
# Grab the NAT and x,y,z lengths from the shape file
if os.path.exists('shape.dat') and (os.path.getsize('shape.dat') != 0):
with open('shape.dat','r') as shapein:
shline = shapein.readline()
if shline == '\n':
shline = shapein.readline()
# Convert SHPAR values to dipoles from (nm)
DipolesPerNM = float(params['DDIST'])
params['SHPAR1'] = (float(params['SHPAR1'])*(DipolesPerNM))
params['SHPAR2'] = (float(params['SHPAR2'])*(DipolesPerNM))
if (params['CSHAPE'] != '4'):
params['SHPAR3'] = (float(params['SHPAR3'])*(DipolesPerNM))
else:
params['SHPAR3'] = (float(params['SHPAR3']))
params['SHPAR4'] = (float(params['SHPAR4'])*(DipolesPerNM))
params['SHPAR5'] = (float(params['SHPAR5'])*(DipolesPerNM))
params['SHPAR6'] = (float(params['SHPAR6'])*(DipolesPerNM))
# Set the plane values accordingly.
# Currently deprecated, default is set to always use 1 plane.
if params['NPLANES'] == '1':
params['PLANE1'] = '0. 0. 180. 1 = phi, thetan_min, thetan_max (deg) for plane A'
params['PLANE2'] = ''
if params ['NPLANES'] == '2':
params['PLANE1'] = '0. 0. 180. 5 = phi, thetan_min, thetan_max (deg) for plane A'
params['PLANE2'] = '90. 0. 180. 5 = phi, thetan_min, thetan_max (deg) for plane B'
# Custom dimensioning is applied based on valid sizings given in the NF235 list.
par1, par2, par3 = data_NF235_LIST(int(round(params['SHPAR1'])), int(round(params['SHPAR2'])), int(round(params['SHPAR3'])))
params['dipole_dim1']= int(par1)
params['dipole_dim2']= int(par2)
params['dipole_dim3']= int(par3)
# Adjustment for cylinder type memory requirements
if (params['CSHAPE'] == '4') or (params['CSHAPE'] == '5') or (params['CSHAPE'] == '6'):
par3 = par2
if (params['CSHAPE'] == '5'):
par1 = par1 + par2
par1,par2,par3 = data_NF235_LIST(int(par1),int(par2),int(par3))
params['dipole_dim1']= int(par1)
params['dipole_dim2']= int(par2)
params['dipole_dim3']= int(par3)
if (params['CSHAPE'] != '8') and (params['CSHAPE'] != '9'):
dipole_NAT = int(par1) * int(par2) * int(par3)
# Begin Shape Check routine to confirm correct parameters for respective Shape options
none_check1 = driver.get('input.phase(page1).group(options).group(dielectrics1to9).group(truediel2).choice(CDIEL2).current')
none_check2 = driver.get('input.phase(page1).group(options).group(dielectrics1to9).group(truediel3).choice(CDIEL3).current')
if params['CSHAPE'] == '1':
params['CSHAPE'] = 'ELLIPSOID'
params['NCOMP'] = ''
params['NCOMP2'] = ''
params['NCOMP3'] = ''
if ((params['SHPAR1'] == 0.0) or (params['SHPAR2'] == 0.0) or (params['SHPAR3'] == 0.0)):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nA parameter was specified with a value of 0.\n"
# if (period_type == '1') or (period_type =='2'):
# params['SHPAR4']="'shape.dat'"
# params['SHPAR5']=''
# params['SHPAR6']=''
elif params['CSHAPE'] == '2':
params['CSHAPE'] = 'ANIELLIPS'
params['NCOMP'] = ''
params['NCOMP1'] = ''
params['NCOMP2'] = ''
# if (period_type == '1') or (period_type =='2'):
# params['SHPAR4']="'shape.dat'"
# params['SHPAR5']=''
# params['SHPAR6']=''
if none_check1 == '5':
ddscat_fail_flag = '1'
ddscat_fail_message += "\nNot all required dielectric materials have been allocated\n"
if none_check2 == '5':
ddscat_fail_flag = '1'
ddscat_fail_message += "\nNot all required dielectric materials have been allocated\n"
if ((params['SHPAR1'] == 0.0) or (params['SHPAR2'] == 0.0) or (params['SHPAR3'] == 0.0)):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nA parameter was specified with a value of 0.\n"
elif params['CSHAPE'] == '3':
params['CSHAPE'] = 'CONELLIPS'
params['NCOMP'] = ''
params['NCOMP1'] = ''
params['NCOMP3'] = ''
# if (period_type == '1') or (period_type =='2'):
# params['SHPAR7']="'shape.dat'"
if none_check1 == '5':
ddscat_fail_flag = '1'
ddscat_fail_message += "\nNot all required dielectric materials have been allocated\n"
if (int(params['SHPAR1']) < int(params['SHPAR4'])) \
or (int(params['SHPAR2']) < int(params['SHPAR5'])) \
or (int(params['SHPAR3']) < int(params['SHPAR6'])):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nThe first concentric ellipsoid specified\n must have larger or equal parameters (SHPAR 1-3)\n compared to the second ellipsoid (SHPAR 4-6).\n"
if ((params['SHPAR1'] == 0.0) or (params['SHPAR2'] == 0.0) or (params['SHPAR3'] == 0.0) or (params['SHPAR4'] == 0.0) or (params['SHPAR5'] == 0.0) or (params['SHPAR6'] == 0.0)):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nA parameter was specified with a value of 0.\n"
elif params['CSHAPE'] == '4':
params['CSHAPE'] = 'CYLINDER1'
params['NCOMP'] = ''
params['NCOMP2'] = ''
params['NCOMP3'] = ''
if ((params['SHPAR1'] == 0.0) or (params['SHPAR2'] == 0.0)):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nA parameter was specified with a value of 0.\n"
if ((params['SHPAR3'] != 1.0) and (params['SHPAR3'] != 2.0) and (params['SHPAR3'] != 3.0)):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nThe third parameter for Cylinders must have a value of 1 or 2 or 3.\n"
# if (period_type == '1') or (period_type =='2'):
# params['SHPAR4']="'shape.dat'"
# params['SHPAR5']=''
# params['SHPAR6']=''
elif params['CSHAPE'] == '5':
params['CSHAPE'] = 'CYLNDRCAP'
params['NCOMP'] = ''
params['NCOMP2'] = ''
params['NCOMP3'] = ''
if ((params['SHPAR1'] == 0.0) or (params['SHPAR2'] == 0.0)):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nA parameter was specified with a value of 0.\n"
# if (period_type == '1') or (period_type =='2'):
# params['SHPAR3']="'shape.dat'"
# params['SHPAR4']=''
# params['SHPAR5']=''
# params['SHPAR6']=''
elif params['CSHAPE'] == '6':
params['CSHAPE'] = 'UNIAXICYL'
params['NCOMP'] = ''
params['NCOMP1'] = ''
params['NCOMP3'] = ''
# if (period_type == '1') or (period_type =='2'):
# params['SHPAR3']="'shape.dat'"
# params['SHPAR4']=''
# params['SHPAR5']=''
# params['SHPAR6']=''
if none_check1 == '5':
ddscat_fail_flag = '1'
ddscat_fail_message += "\nNot all required dielectric materials have been allocated\n"
if ((params['SHPAR1'] == 0.0) or (params['SHPAR2'] == 0.0)):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nA parameter was specified with a value of 0.\n"
elif params['CSHAPE'] == '7':
params['CSHAPE'] = 'RCTGLPRSM'
params['NCOMP'] = ''
params['NCOMP2'] = ''
params['NCOMP3'] = ''
# if (period_type == '1') or (period_type =='2'):
# params['CSHAPE'] = 'RCTGL_PBC'
# params['SHPAR3']="'shape.dat'"
# params['SHPAR4']=''
# params['SHPAR5']=''
# params['SHPAR6']=''
if ((params['SHPAR1'] == 0.0) or (params['SHPAR2'] == 0.0) or (params['SHPAR3'] == 0.0)):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nA parameter was specified with a value of 0.\n"
elif params['CSHAPE'] == '8':
params['CSHAPE'] = 'FROM_FILE'
params['NCOMP1'] = ''
params['NCOMP2'] = ''
params['NCOMP3'] = ''
params['SHPAR1'] = ''
params['SHPAR2'] = ''
params['SHPAR3'] = ''
if (period_type == '1') or (period_type =='2'):
DipolesPerNM = 1/float(params['customDDIST'])
params['CSHAPE']='FRMFILPBC'
params['SHPAR1']= driver.get('input.phase(page3).group(PERIOD_SHPARs).number(PERIOD_SHPAR1).current')
params['SHPAR2']= driver.get('input.phase(page3).group(PERIOD_SHPARs).number(PERIOD_SHPAR2).current')
if driver.get('input.phase(page3).group(PERIOD_SHPARs).boolean(snap_PS1).current') == "yes":
try:
params['SHPAR1']= ddip2
except NameError:
pass
if driver.get('input.phase(page3).group(PERIOD_SHPARs).boolean(snap_PS2).current') == "yes":
try:
params['SHPAR2']= ddip3
except NameError:
pass
params['SHPAR1'] = (float(params['SHPAR1'])*(DipolesPerNM))
params['SHPAR2'] = (float(params['SHPAR2'])*(DipolesPerNM))
params['SHPAR3']="'shape.dat'"
params['SHPAR4']=''
params['SHPAR5']=''
params['SHPAR6']=''
if (period_type == '1'):
params['SHPAR2']='0'
# Reallocate the dipole memory dimensions using NF235 values
try:
ddip1 = NAT2x
ddip2 = NAT2y
ddip3 = NAT2z
par1,par2,par3 = data_NF235_LIST(int(float(ddip1)),int(float(ddip2)),int(float(ddip3)))
params['dipole_dim1'] = int(par1)
params['dipole_dim2'] = int(par2)
params['dipole_dim3'] = int(par3)
except (ValueError,IndexError,NameError):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nNo valid Input Object File was found.\n"
# Set correct numeric value for IORTH
# IORTH is no longer always 1.
if params['IORTH'] == 'yes':
params['IORTH'] = '2'
else:
params['IORTH'] = '1'
# If the user inputs custom wavelengths, write them to a file
if params['WCDIVID'] == 'TAB':
try:
with open('wave.tab', 'w') as tabfile:
tabfile.write(params['WAV_table'])
except IOError, e:
log('ERROR: ' + e.strerror)
# Place any custom dielectric files in the same directory as the other dielectrics
material_dir = os.path.join(tool_path, 'data/diel')
# Repeated for Custom Dielectrics 2-9, which reside together in a different tool section then Diel #1
indielname={}
indieldata={}
infile={}
for i in range(1,10):
indielname['input_diel_filename{0}'.format(i)] = 'custom_dielectric{0}'.format(i)
indieldata['input_diel_data{0}'.format(i)] = driver.get(driver.get('input.phase(page1).group(options).group(dielectrics1to9).group(truediel{0}).loader(compload{0}).upload.to'.format(i)) + '.current')
with open(indielname['input_diel_filename{0}'.format(i)],'w') as infile['in_diel_file{0}'.format(i)]:
infile['in_diel_file{0}'.format(i)].write(indieldata['input_diel_data{0}'.format(i)])
# Generate any constant custom dielectric files needed.
chek_en1 = ''
chek_enn = ''
diel_num_list = []
for en in range (1,10):
chek_enn = driver.get('input.phase(page1).group(options).group(dielectrics1to9).group(truediel{0}).loader(compload{0}).current'.format(en))
en_value = driver.get('input.phase(page1).group(options).group(mydielectrics1to9).group(minidiel{0}).number(customm_CDIEL{0}).current'.format(en))
if '{0}'.format(chek_enn)=="Input Constant Custom Dielectric":
diel_num_list.append((en, en_value))
gen_custom_diels(diel_num_list)
# Prepare the file paths relevant to current dielectric file choice(s), stored in prep_material/prepmat[]
prepmat={}
prepmatval={}
for i in range (1,10):
prepmat['prep_material{0}'.format(i)] = os.path.join(tool_path, 'data/diel', cudiel[i])
#Prepare Logic Values for identifying custom input dielectric files
# Logic values are: 1 or 10 = custom diel, 5 = no diel, name = library diel file
for i in range(1,10):
prepmatval['prep_material{0}_val'.format(i)] = cudiel[i]
# Modify material name if Custom Dielectric Material is selected
for i in range(1,10):
if ((prepmatval['prep_material{0}_val'.format(i)] == '1') or (prepmatval['prep_material{0}_val'.format(i)] == '10')):
prepmat['prep_material{0}'.format(i)] = os.path.realpath(indielname['input_diel_filename{0}'.format(i)])
# Place dielectric file paths in the paramater file
for i in range(1,10):
if ((prepmatval['prep_material{0}_val'.format(i)] == '1') or (prepmatval['prep_material{0}_val'.format(i)] == '10')):
params['materials{0}'.format(i)] = """'{0}' = dielectric file {1}""".format(indielname['input_diel_filename{0}'.format(i)], i)
else:
params['materials{0}'.format(i)] = """'{0}' = dielectric file {1}""".format(prepmatval['prep_material{0}_val'.format(i)], i)
# diel_files = dielectric files to be passed for processing, initialized with the first diel file.
# diel_files must be sent the full path, .par file must be sent just the name.
# Default case handling:
# - Zeroed out input for when dielectrics are not in use.
# - Pass the 'dielectric_' renames if a custom dielectric is used.
# - Pass the default dielectric names if defaults are used.
diel_files = [prepmat['prep_material1']]
for i in range(2,10):
if prepmatval['prep_material{0}_val'.format(i)] == '5':
params['materials{0}'.format(i)] = ''
else:
diel_files.append(prepmat['prep_material{0}'.format(i)])
# Add any custom shapefiles to the list of files to send to processing.
check_variable = driver.get('input.phase(page1).group(options).group(uploader).loader(loaded).current')
cshape_check = driver.get('input.phase(page1).group(options).group(hide_CSHAPE).choice(CSHAPE).current')
if cshape_check == '9':
check_variable = 'Uploaded data'
if check_variable == 'Uploaded data':
diel_files.append(os.path.realpath(input_shape_filename))
progress()
# Perform last-stage value grabbing to determine which type of
# DDSCAT to run and how many cores, threads to use.
# ddscat_check is 0 for local, 1 for remote.
run_type = driver.get('input.phase(page5).group(process).choice(RUNSTATE).current')
num_cores = 1
wall_time = int(driver.get('input.phase(page5).group(process).integer(WALLTIME_M).current'))
collect_timing = "yes"
diel_files_submit = []
for diel_file in diel_files:
diel_files_submit.append("-i")
diel_files_submit.append(diel_file)
# If Nearfield is to be calculated, select the file type to send to our lite version of ddpostprocess
# Note: this is not configured for IORTH=2 at all.
# Thus, plots of .E2 and .EB2 are currently ignored.
nearfield_calculate = driver.get('input.phase(page3).group(NRFLD_HEAD).choice(NRFLD).current')
if (nearfield_calculate == '1'):
ddppfile_to_use = 'w000r000k000.E1'
if (nearfield_calculate == '2'):
ddppfile_to_use = 'w000r000k000.EB1'
if (nearfield_calculate == '1') or (nearfield_calculate == '2'):
# Zero out any extended E-field boundaries for TUCs that are touching.
# Note that for Periodicity, SHPAR1 holds the y-value. SHPAR2 holds the z-value.
params['NRFLD_r1'] = '0.5'
params['NRFLD_r2'] = '0.5'
params['NRFLD_r3'] = '0.5'
params['NRFLD_r4'] = '0.5'
params['NRFLD_r5'] = '0.5'
params['NRFLD_r6'] = '0.5'
if ((period_type == '1') or (period_type == '2')) and (params['SHPAR1'] != '') and (params['SHPAR2'] != ''):
if os.path.exists('shape.dat') and (os.path.getsize('shape.dat') != 0):
if ('{0}'.format(float(params['SHPAR1'])) == NAT2y):
params['NRFLD_r3'] = '0'
params['NRFLD_r4'] = '0'
if ('{0}'.format(float(params['SHPAR2'])) == NAT2z) and (period_type == '2'):
params['NRFLD_r5'] = '0'
params['NRFLD_r6'] = '0'
# If periodic conditions are used, build the Periodic Parameter Conditions
if period_type == '1':
params['FRAME_TYPE'] = 'TFRAME'
params['NPLANES'] = ''
params['PLANE1'] = ''
params['PLANE2'] = ''
params['NPLANE_TEXT']= ''
params['PERIODIC_SCATTERING_ORDERS']= '1 = number of scattering cones\n0. 0. 180. 0.05 = OrderM zetamin zetamax dzeta for scattering cone 1'
if period_type == '2':
params['FRAME_TYPE'] = 'TFRAME'
params['NPLANES'] = ''
params['PLANE1'] = ''
params['PLANE2'] = ''
params['NPLANE_TEXT']= ''
params['PERIODIC_SCATTERING_ORDERS']= '1 = number of scattering orders\n0. 0. = OrderM OrderN for scattered radiation'
# If a custom shapefile is used, space its dipoles properly.
if params['CSHAPE'] == 'FROM_FILE':
DipolesPerNM = 1/float(params['customDDIST'])
#Prepare the aeff values from DDIST for DDSCAT use.
distance = (1/(DipolesPerNM))
# convert back to microns
distance = (distance/1000)
# covert error handling
if ddscat_fail_flag == '1':
dipole_NAT = 1
volume = float(((distance**3)*(dipole_NAT)))
pie = math.pi
aeff = ((3*volume)/(4*pie))**(float(1)/3)
# Prepare the AEFFINI, AEFFEND, NRAD, RCDIVID values for the par file based on user input.
params['AEFFINI']='{0}'.format(aeff)
params['AEFFEND']='{0}'.format(aeff)
params['NRAD']='1'
params['RCDIVID']='LIN'
# Perform conversions for user-specified wavelength in (nm) to microns
start_wav_temp = float(params['WAVINI'])
params['WAVINI'] = (start_wav_temp/float(1000))
end_wav_temp = float(params['WAVEND'])
params['WAVEND'] = (end_wav_temp/float(1000))
# If requested, run the command for submitting DDSCAT to a cluster,
# splitting multiple wavelengths into different jobs.
# Job specification is implicit to number of wavelength splits.
# Currently defaulted to 1 job per wavelength.
# The single job with the max light extinction is re-submitted for nearfield simulations.
# In the case of only a single wavelength, only a single nearfield simulation is run.
Rappture.Utils.progress(*(int(30), "Running DDSCAT..."))
# First, check the amount CPU/Memory requested is available:
if (run_type == 'remote_splitting') and (ddscat_fail_flag == '0'):
Field_status = driver.get('input.phase(page3).group(NRFLD_HEAD).choice(NRFLD).current')
memory_usage, venue_name = get_mem(dipole_NAT, Field_status)
if (Field_status != '0'):
mcheck, sizeMB, freemem = memory_check(dipole_NAT)
if (not mcheck):
ddscat_fail_flag = '1'
ddscat_fail_message += "\n\nThe simulation/conversion requested\n requires more disk space than the user has available.\n The disk space required is {0}MB.\n The disk available is {1}MB.\n".format(sizeMB, freemem)
cores_to_use = int(math.ceil(float(memory_usage)/float(4000)))
if cores_to_use == 0:
cores_to_use = 1
if (cores_to_use <= 48):
Field_status = '0'
memory_usage_noe, venue_name_noe = get_mem(dipole_NAT, Field_status)
normal_cores_to_use = int(math.ceil(float(memory_usage_noe)/float(4000)))
if normal_cores_to_use == 0:
normal_cores_to_use = 1
if (cores_to_use > 48):
cores_to_use = 0
ddscat_fail_flag = '1'
ddscat_fail_message += "\n The job requested was predicted to use {0}MB of RAM while the maximum allowed is 192000MB of RAM\n".format(memory_usage)
# Note that the above error capture method still holds even though we're using tiers instead of core counts.
# This is because the limit is still 192 GB, which was 48x4GB cores previously.
# Second, actually prepare and send the submission if no fail flag is set
if (run_type == 'remote_splitting') and (ddscat_fail_flag == '0'):
single_length = 0
nearfield_set = driver.get('input.phase(page3).group(NRFLD_HEAD).choice(NRFLD).current')
start_wav = float(params['WAVINI'])
end_wav = float(params['WAVEND'])
if (start_wav == end_wav) and (nearfield_set != '0'):
maxWaveExt = float(start_wav)
single_length = 1
num_wavs = int(params['NWAV'])
# reset cores for current submit command method which requires that cores be input as 0 for all submissions.
# counting cores is deprecated, but may be useful in the future if DDSCAT gets better parallel functionality.
normal_cores_to_use = 0
cores_to_use = 0
assert start_wav <= end_wav
assert num_wavs >= 1
# Calculate specific wavelengths to use based on the selected
# interpolation method.
wavs = []
if params['WCDIVID'] == 'LIN':
if num_wavs != 1:
step = (end_wav - start_wav) / (num_wavs - 1)
elif num_wavs == 1:
step = 0
current_wav = start_wav
for _ in range(num_wavs):
wavs.append(current_wav)
current_wav += step
wavs[-1] = end_wav
# There should be at least one wavelength per job.
num_jobs = num_wavs
# Group wavelengths by job number.
wav_groups = [list() for _ in range(num_jobs)]
while len(wavs) > 0:
for job_num in range(num_jobs):
wav_groups[job_num].append(wavs.pop())
home_dir = os.getcwd()
params['WCDIVID'] = 'TAB'
params['NRFLD'] = '0'
with open('ddscat.par', 'w') as param_file:
param_file.write(param_file_template.format(**params))
# Output the .par file parameters to screen
with open('ddscat.par','r') as parfile:
for line in parfile:
sys.stdout.write(line)
# Create a temporary directory for each job number and write a
# wave.tab file.
wav_files = []
working_dirs = []
for job_num in range(num_jobs):
working_dir = tempfile.mkdtemp(dir=home_dir)
working_dirs.append(working_dir)
os.chdir(working_dir)
with open('wave.tab', 'w') as wav_table:
# DDSCAT expects the wave.tab file to start with a header line.
wav_table.write('\n')
for wav in wav_groups[job_num]:
wav_table.write(str(wav) + '\n')
wav_files.append(os.path.join(working_dir, 'wave.tab'))
os.chdir(home_dir)
command = ["submit"]
command.append("-M")
command.append("-v")
command.append(venue_name_noe)
command.append("-n")
command.append(str(normal_cores_to_use))
command.append("-e")
command.append("OMP_NUM_THREADS=1")
command.append("-e")
command.append("DDSCAT_DISABLE_TARGET_OUT=TRUE")
command.append("-w")
command.append(str(wall_time))
command.append("-p")
command.append("@@wav=%s" % (','.join(wav_files)))
command.append("-i")
command.append("@@wav")
command.append("-i")
command.append(os.path.join(home_dir,'ddscat.par'))
command += diel_files_submit
if (params['CSHAPE'] == '8') or (params['CSHAPE'] == '9') :
command.append("-i")
command.append(os.path.join(home_dir,'shape.dat'))
command.append("ddscat-7.3.0-intel-14_openmp")
# Run submit , percentage bar incrementer run in parallel with 'submit' command.
# Make a temporary directory to return the submit results to, save its path name.
# Initialize an empty log for saving the stdout from the submit command.
saved_home = os.getcwd()
if not os.path.exists('submit_results'):
os.makedirs('submit_results')
sub_path = os.path.realpath('submit_results')
os.chdir(sub_path)
with open(os.path.realpath('submit_log'), 'w') as submit_stream:
1
# Deprecated timer threading:
# submitThread = threading.Thread(target=percent_timer, args=(num_jobs, wall_time, sub_path))
# submitThread.daemon=True
# submitThread.start()
if single_length == 0:
exit_status, stdout, stderr = RapptureExec(command, streamOutput=True)
submit_log = stdout + stderr
with open('submit_log','w') as slog:
slog.write(submit_log)
os.chdir(saved_home)
stdout_log, stdout_list, capture_out = find_stderr(".*.stderr$",home_dir)
else:
exit_status, stdout, stderr = '0','skip',''
os.chdir(saved_home)
stdout_log, stdout_list, capture_out = 'skip stdoutlog\n','','skip capout\n'
if ('{0}'.format(exit_status) != '0') or (stdout_list != '') or (capture_out==''):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nThe standard remote submission returned unsuccessfully.\n"
if venue_name == 'invalid_problem_size':
ddscat_fail_message += "\nInvalid Problem Size - The simulation requested more than 192GB of memory.\n"
# Collect job output from not-single-wavelength-Efield simulations.
if (ddscat_fail_flag != '1') and (single_length != 1):
collate_table('mtable', find_all('mtable', home_dir), slice(0, 10))
maxWaveExt = collate_table('qtable', find_all('qtable', home_dir), slice(10, 21))
collate_table('qtable2', find_all('qtable2', home_dir), slice(10, 21))
# Remove temporary directories.
for working_dir in working_dirs:
shutil.rmtree(working_dir, ignore_errors=True)
# re-submit the single wavelength which the Nearfield should be calculated for
if (nearfield_set != '0') and (ddscat_fail_flag == '0'):
Rappture.Utils.progress(*(int(90), "Running DDSCAT for Nearfield Wavelength..."))
params['WAVINI'] = '{0}'.format(float(maxWaveExt))
params['WAVEND'] = '{0}'.format(float(maxWaveExt))
params['NWAV'] = '1'
params['WCDIVID'] = 'LIN'
params['NRFLD'] = '{0}'.format(nearfield_set)
with open('ddscat.par', 'w') as param_file:
param_file.write(param_file_template.format(**params))
# Create the submit command.
command = ["submit"]
command.append("-M")
command.append("-v")
command.append(venue_name)
command.append("-n")
command.append(str(cores_to_use))
command.append("-e")
command.append("OMP_NUM_THREADS=1")
command.append("-e")
command.append("DDSCAT_DISABLE_TARGET_OUT=TRUE")
command.append("-w")
command.append(str(wall_time))
command.append("-i")
command.append(os.path.join(home_dir,'ddscat.par'))
command += diel_files_submit
if (params['CSHAPE'] == '8') or (params['CSHAPE'] == '9') :
command.append("-i")
command.append(os.path.join(home_dir,'shape.dat'))
command.append("ddscat-7.3.0-intel-14_openmp")
# Make a temporary directory to return the submit results to, save its path name.
# Initialize an empty log for saving the stdout from the submit command.
saved_home = os.getcwd()
if not os.path.exists('submit_results_efield'):
os.makedirs('submit_results_efield')
sub_path = os.path.realpath('submit_results_efield')
working_path = os.path.join(os.getcwd(), 'w000r000k000.E1')
working_pathB = os.path.join(os.getcwd(), 'w000r000k000.EB1')
os.chdir(sub_path)
Efield_path = os.path.join(os.getcwd(), 'w000r000k000.E1')
EBfield_path = os.path.join(os.getcwd(), 'w000r000k000.EB1')
exit_status, stdout, stderr = RapptureExec(command, streamOutput=True)
submit_log = stdout + stderr
with open('submit_log','w') as slog:
slog.write(submit_log)
os.chdir(saved_home)
stdout_log, stdout_list, capture_out = find_stderr(".*.stderr$",home_dir)
if ('{0}'.format(exit_status) != '0') or (stdout_list != '') or (capture_out == ''):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nThe Nearfield-generating remote submission returned unsuccessfully.\n"
if venue_name == 'invalid_problem_size':
ddscat_fail_message += "\nInvalid Problem Size - The simulation requested more than 192GB of memory.\n"
if os.path.exists(Efield_path):
os.rename(Efield_path,working_path)
if os.path.exists(EBfield_path):
os.rename(EBfield_path,working_pathB)
# Collect all .stdout and .stderr files and put them in the output log.
stdout_log, stdout_list, capture_out = find_stderr(".*.stderr$",home_dir)
log(capture_out)
# Collect job output for single-wavelength-Efield simulations.
if (ddscat_fail_flag != '1') and (single_length == 1):
collate_table('mtable', find_all('mtable', home_dir), slice(0, 10))
maxWaveExt = collate_table('qtable', find_all('qtable', home_dir), slice(10, 21))
collate_table('qtable2', find_all('qtable2', home_dir), slice(10, 21))
# Collect timing output
if collect_timing == 'yes':
total_time = 0.0
wavelength_times = []
concat_stderr_home = []
for stderr_path in find_regex(r'.*\.stderr', home_dir):
with open(stderr_path, 'r') as stderr_file:
wavelength, time = get_timing_info(stderr_file.read())
stderr_read = stderr_file.read()
concat_stderr_home.append((wavelength, stderr_read))
wavelength_times.append((wavelength, time))
total_time += time
# Sort by wavelength.
concat_stderr_home.sort(key=lambda x: x[0])
wavelength_times.sort(key=lambda x: x[0])
catch_neartime = []
timing_info = 'Total time: {} sec.\n'.format(total_time)
for wavelength, time in wavelength_times:
if wavelength in catch_neartime:
timing_info += 'Wavelength (Nearfield) {}: {} sec.\n'.format(wavelength, time)
if wavelength not in catch_neartime:
catch_neartime.append(wavelength)
timing_info += 'Wavelength {}: {} sec.\n'.format(wavelength, time)
# Run command for regular DDSCAT.
elif (run_type == 'local') and (ddscat_fail_flag == '0'):
Field_status = driver.get('input.phase(page3).group(NRFLD_HEAD).choice(NRFLD).current')
memory_usage, venue_name = get_mem(dipole_NAT, Field_status)
if (Field_status != '0'):
mcheck, sizeMB, freemem = memory_check(dipole_NAT)
if (not mcheck):
ddscat_fail_flag = '1'
ddscat_fail_message += "\n\nThe simulation/conversion requested\n requires more disk space than the user has available.\n The disk space required is {0}MB.\n The disk available is {1}MB.\n".format(sizeMB, freemem)
if (memory_usage > 16000):
ddscat_fail_flag = '1'
ddscat_fail_message += "\n\nThe simulation/conversion requested\n requires more memory than the user has available.\n The memory required is {0}MB.\n The memory available is 16000MB.\n".format(memory_usage)
with open('ddscat.par', 'w') as param_file:
param_file.write(param_file_template.format(**params))
# Output the .par file parameters to screen
with open('ddscat.par','r') as parfile:
for line in parfile:
sys.stdout.write(line)
sys.stdout.flush()
# Copy material files to the working directory.
for diel_file in diel_files:
if diel_file.split('/')[-1] in ('AuDiel.tab','AgDiel.tab','Pt_diel.tab','Pd_diel.tab','Cu_diel.txt','TiO2.tab','SiO2.tab'):
shutil.copy(diel_file, '.')
if (params['CSHAPE'] == '8') or (params['CSHAPE'] == '9'):
runDDSCAT = ['ddscat','DDSCAT_DISABLE_TARGET_OUT=TRUE']
else:
runDDSCAT = ['ddscat']
start_wav_local = float(params['WAVINI'])
end_wav_local = float(params['WAVEND'])
nearfield_local = float(params['NRFLD'])
if ((start_wav_local == end_wav_local) or (nearfield_local == 0)):
exit_code, stdout, stderr = RapptureExec(runDDSCAT, streamOutput=True)
#
elif ((nearfield_local != 0) and (start_wav_local != end_wav_local)):
params['NRFLD'] = '0'
os.rename('ddscat.par','ddscat.par_save')
with open('ddscat.par', 'w') as param_file:
param_file.write(param_file_template.format(**params))
exit_code, stdout, stderr1 = RapptureExec(runDDSCAT, streamOutput=True)
# log(stderr)
Efield_to_use, EBfield_to_use, maxWaveExt = local_maxqext_grab('qtable', find_all('qtable', os.getcwd()), slice(10,21), nearfield_calculate)
os.rename('qtable','qtable_save')
os.rename('qtable2','qtable2_save')
os.rename('mtable','mtable_save')
if nearfield_calculate == "1":
params['NRFLD'] = '1'
elif nearfield_calculate == "2":
params['NRFLD'] = '2'
params['WAVINI'] = '{0}'.format(maxWaveExt)
params['WAVEND'] = '{0}'.format(maxWaveExt)
params['NWAV'] = '1'
with open('ddscat.par', 'w') as param_file:
param_file.write(param_file_template.format(**params))
exit_code, stdout, stderr = RapptureExec(runDDSCAT, streamOutput=True)
stderr = stderr1 + stderr
os.rename('ddscat.par_save','ddscat.par')
os.rename('qtable_save','qtable')
os.rename('qtable2_save','qtable2')
os.rename('mtable_save','mtable')
if ('{0}'.format(exit_code) != '0') or (stdout != ''):
ddscat_fail_flag = '1'
ddscat_fail_message += "\nDDSCAT failed to exit successfully.\n"
stdout_log = stdout
if collect_timing == 'yes':
total_time = 0.0
wavelength_times = get_timing_info_local(stderr)
for wavelength, time in wavelength_times:
total_time += time
catch_neartime = []
timing_info = 'Total time: {} sec.\n'.format(total_time)
for wavelength, time in wavelength_times:
if wavelength in catch_neartime:
timing_info += 'Wavelength (Nearfield) {}: {} sec.\n'.format(wavelength, time)
if wavelength not in catch_neartime:
catch_neartime.append(wavelength)
timing_info += 'Wavelength {}: {} sec.\n'.format(wavelength, time)
log(stdout)
log(stderr)
home_dir = os.getcwd()
if ddscat_fail_flag != '1':
Efield_to_use, EBfield_to_use, maxWaveExt = local_maxqext_grab('qtable', find_all('qtable', home_dir), slice(10,21), nearfield_calculate)
if not (os.path.exists('w000r000k000.E1')):
Efield_to_use = 'w000r000k000.E1'
if not (os.path.exists('w000r000k000.EB1')):
EBfield_to_use = 'w000r000k000.EB1'
progress()
# Output an error message to the output logs if DDSCAT seems to have failed.
# Note that this output is placed here so that it is the first thing the user sees in the logs if it occurs.
if run_type != 'local':
Efield_to_use = 'w000r000k000.E1'
EBfield_to_use = 'w000r000k000.EB1'
check_outlog = parse_output_log()
if check_outlog != '0':
exit_code = 1
ddscat_fail_flag = '1'
stdout_log = '{0}'.format(check_outlog)
ddscat_fail_message += 'Not enough time or memory was allocated to complete the job.'
if ddscat_fail_flag == '1':
# Quickly test some name allocations that don't get set if DDSCAT didn't run.
try:
stdout_log += ''
except NameError:
stdout_log = ''
try:
checkEfieldname = Efield_to_use
except NameError:
Efield_to_use = 'filenotfound'
try:
checkEBfieldname = EBfield_to_use
except NameError:
EBfield_to_use = 'filenotfound'
try:
checktiming = timing_info
except NameError:
timing_info = 'No timing computed, DDSCAT failed.'
driver.put('output.string(failure).about.label','***SIMULATION STATUS***')
driver.put('output.string(failure).current','<<< DDSCAT has encountered an error in processing. >>>\n\n Resultant data displayed for the settings attempted is void. \n If it was generated, please see the Output Log for more detailed information\n')
driver.put('output.string(failure).current','\nReason: ', append=True)
driver.put('output.string(failure).current',ddscat_fail_message, append=True)
driver.put('output.string(failure).current','\n\nCrash Log:\n', append=True)
driver.put('output.string(failure).current','{0}'.format(stdout_log), append=True)
driver.put('output.string(failure).current', '\n# Nearfield Not Requested or Not Usable', append=True)
# Check that the file to be used for the E-field is not an empty file
Efield_fail_flag = '0'
if (nearfield_calculate == "0" or nearfield_calculate == "1"):
if os.path.exists(Efield_to_use):
Efsize = os.stat(Efield_to_use).st_size
if Efsize <= 0:
Efield_fail_flag = '1'
if (nearfield_calculate == "2"):
if os.path.exists(EBfield_to_use):
EBfsize = os.stat(EBfield_to_use).st_size
if EBfsize <= 0:
Efield_fail_flag = '1'
if Efield_fail_flag == '1':
efield_fail_message = "\nThe request for an Electric Field did not return from DDSCAT successfully."
driver.put('output.string(failure).about.label','***SIMULATION STATUS***')
driver.put('output.string(failure).current','<<< DDSCAT has encountered an error in processing. >>>\n\n Resultant data displayed for the settings attempted is void. \n If it was generated, please see the Output Log for more detailed information\n')
driver.put('output.string(failure).current',efield_fail_message, append=True)
driver.put('output.string(failure).current', '\n# Nearfield Not Requested or Not Usable', append=True)
# Write a successful output status if successful.
if (Efield_fail_flag != '1') and (ddscat_fail_flag != '1'):
driver.put('output.string(failure).about.label','***SIMULATION STATUS***')
driver.put('output.string(failure).current','<<< DDSCAT succeeded in processing! >>>\n If it was generated, please see the Output Log for more detailed information\n')
driver.put('output.string(failure).current','\nWavelength Value Considered for Nearfield Calculations: {0}\n'.format(float(maxWaveExt)), append=True)
if run_type != 'local':
memcore2 = int(normal_cores_to_use)*4000
memdiff2 = float(memcore2 - memory_usage_noe)
memcore1 = int(cores_to_use)*4000
memdiff1 = float(memcore1 - memory_usage)
# driver.put('output.string(failure).current','\nNumber of Cores Used: {0}\n'.format(normal_cores_to_use), append=True)
# driver.put('output.string(failure).current','\nPredicted Non-Efield Memory Usage: {0} MB Needed, {1} MB Requested, {2} MB Surplus\n'.format(memory_usage_noe, memcore2, memdiff2), append=True)
# driver.put('output.string(failure).current','\nNumber of Cores Used For Nearfield Calculation: {0}\n'.format(cores_to_use), append=True)
# driver.put('output.string(failure).current','\nPredicted Nearfield Memory Usage: {0} MB Needed, {1} MB Requested, {2} MB Surplus\n'.format(memory_usage, memcore1, memdiff1), append=True)
# Prepare plots for output, guarantees first outputs in menu. If there is no error message.
#qtable
driver.put('output.curve(qtable).about.label','Light Extinction EF vs. Wavelength')
driver.put('output.curve(qtable).about.description','The plot corrosponding to the extinction behavior denoted numerically in qtable.')
driver.put('output.curve(qtable).xaxis.label','Wavelength')
driver.put('output.curve(qtable).xaxis.units','uM')
driver.put('output.curve(qtable).yaxis.label','Light Extinction Efficiency Factor')
#qtable2
driver.put('output.curve(qtable2).about.label','Light Absorption EF vs. Wavelength')
driver.put('output.curve(qtable2).about.description','The plot corrosponding to the absorption behavior denoted numerically in qtable.')
driver.put('output.curve(qtable2).xaxis.label','Wavelength')
driver.put('output.curve(qtable2).xaxis.units','uM')
driver.put('output.curve(qtable2).yaxis.label','Light Absorption Efficiency Factor')
#qtable3
driver.put('output.curve(qtable3).about.label','Light Scattering EF vs. Wavelength')
driver.put('output.curve(qtable3).about.description','The plot corrosponding to the scattering behavior denoted numerically in qtable.')
driver.put('output.curve(qtable3).xaxis.label','Wavelength')
driver.put('output.curve(qtable3).xaxis.units','uM')
driver.put('output.curve(qtable3).yaxis.label','Light Scattering Efficiency Factor')
#qtable4
driver.put('output.curve(qtable4).about.label','Phase Lag EF vs. Wavelength')
driver.put('output.curve(qtable4).about.description','The plot corrosponding to the behavior denoted numerically in qtable2.')
driver.put('output.curve(qtable4).xaxis.label','Wavelength')
driver.put('output.curve(qtable4).xaxis.units','uM')
driver.put('output.curve(qtable4).yaxis.label','Phase Lag Efficiency Factor')
# If the E-field is to be calculated, run our lite version of ddpostprocess and display the results.
no_display_flag = 0
if (os.path.exists('shape.dat')) and (not os.path.exists('target.out')):
os.rename('shape.dat','target.out')
if (os.path.exists('target.out')):
if (os.path.getsize('target.out') > 200000000):
no_display_flag = 1
if ((nearfield_calculate == '1') or (nearfield_calculate == '2')) and (ddscat_fail_flag != '1') and (Efield_fail_flag != '1'):
ddpp_path = os.path.join(tool_path, 'bin/myddpostprocess')
squareval = driver.get('input.phase(page3).group(NRFLD_HEAD).group(NRFLD_LINE).choice(NRFLD_IVTR).current')
ddStatus, ddStdout, ddStderr = RapptureExec([ddpp_path, ddppfile_to_use, squareval], streamOutput=False)
if ddStatus != 0:
text = ddStdout + ddStderr
log(text)
sys.stderr.write(text)
driver.result(ddStatus)
sys.exit(ddStatus)
RawDataFile = os.path.join(os.getcwd(),'field_datafile.txt')
getSecret = driver.get('input.phase(page2).group(secretmenu).choice(secretdata).current')
getSecretX = driver.get('input.phase(page2).group(secretmenu).number(fixX).current')
getSecretY = driver.get('input.phase(page2).group(secretmenu).number(fixY).current')
getSecretZ = driver.get('input.phase(page2).group(secretmenu).number(fixZ).current')
min_x,min_y,min_z,min_e,max_x,max_y,max_z,max_e,num_pts_x,num_pts_y,num_pts_z = BuildVTKfiles(RawDataFile, squareval, getSecret, getSecretX,getSecretY,getSecretZ)
#Cleanup and printout for E-field processing
remove_all_w(int(params['NWAV']))
### Visualization for E-Field
style1path1 = os.path.join(tool_path, 'rappture/dda/Styles')
style1path2 = os.path.join(os.getcwd(), 'Styles')
shutil.copyfile(style1path1,style1path2)
if os.path.exists('EField_VTK.vtk'):
driver.put('output.field(2).about.label', 'Electric Field (3D Field)', '0')
with open('EField_VTK.vtk', 'r') as VTKfile:
driver.put('output.field(2).component.vtk', VTKfile.read(), '0', '0')
with open(os.path.realpath('Styles'), 'r') as Stylefile:
driver.put('output.field(2).component.style', Stylefile.read(),'0')
# apply axis labeling (i.e. units)
# Note that the units are actually (nm), but confusingly the axes always display an extra (10^-3) so
# to counteract this I am just writing (um) and it looks like (um) x (10^-3)
driver.put('output.field(2).about.xaxis.label', 'X (um)', '0')
driver.put('output.field(2).about.yaxis.label', 'Y (um)', '0')
driver.put('output.field(2).about.zaxis.label', 'Z (um)', '0')
if os.path.exists('EField_VTK_logscale.vtk'):
driver.put('output.field(4).about.label', 'Electric Field in Log Scale (3D Field)', '0')
with open('EField_VTK_logscale.vtk', 'r') as VTKfile:
driver.put('output.field(4).component.vtk', VTKfile.read(), '0', '0')
with open(os.path.realpath('Styles'), 'r') as Stylefile:
driver.put('output.field(4).component.style', Stylefile.read(),'0')
driver.put('output.field(4).about.xaxis.label', 'X (nm)', '0')
driver.put('output.field(4).about.yaxis.label', 'Y (nm)', '0')
driver.put('output.field(4).about.zaxis.label', 'Z (nm)', '0')
if os.path.exists('BField_VTK.vtk') and (Field_status == "2"):
driver.put('output.field(8).about.label', 'Magnetic Field (3D Field)', '0')
with open('BField_VTK.vtk', 'r') as VTKfile:
driver.put('output.field(8).component.vtk', VTKfile.read(), '0', '0')
with open(os.path.realpath('Styles'), 'r') as Stylefile:
driver.put('output.field(8).component.style', Stylefile.read(),'0')
# apply axis labeling (i.e. units)
# Note that the units are actually (nm), but confusingly the axes always display an extra (10^-3) so
# to counteract this I am just writing (um) and it looks like (um) x (10^-3)
driver.put('output.field(8).about.xaxis.label', 'X (um)', '0')
driver.put('output.field(8).about.yaxis.label', 'Y (um)', '0')
driver.put('output.field(8).about.zaxis.label', 'Z (um)', '0')
# If Vector Field requested, draw it
vecILINE = driver.get('input.phase(page3).group(NRFLD_HEAD).group(vectorinfo).choice(NRFLD_VECTOR).current')
if (nearfield_calculate == '0'):
vecILINE = "0"
# vspacing = 0.001 * float(driver.get('input.phase(page3).group(NRFLD_HEAD).group(vectorinfo).number(vecspacing).current'))
if (vecILINE == "1" or vecILINE == "2"):
# if (vspacing != 0):
# num_pts_x = int(round(round((max_x - min_x)/float(vspacing),4)))
# num_pts_y = int(round(round((max_y - min_y)/float(vspacing),4)))
# num_pts_z = int(round(round((max_z - min_z)/float(vspacing),4)))
# Prepare the vector mesh
# Mesh
driver.put('output.mesh(mymesh).about.label', 'Object Mesh')
driver.put('output.mesh(mymesh).dim', '3')
# driver.put('output.mesh(mymesh).units', 'um')
driver.put('output.mesh(mymesh).hide', 'yes')
driver.put('output.mesh(mymesh).grid.xaxis.min',min_x)
driver.put('output.mesh(mymesh).grid.xaxis.max',max_x)
driver.put('output.mesh(mymesh).grid.xaxis.numpoints',num_pts_x)
driver.put('output.mesh(mymesh).grid.yaxis.min',min_y)
driver.put('output.mesh(mymesh).grid.yaxis.max',max_y)
driver.put('output.mesh(mymesh).grid.yaxis.numpoints',num_pts_y)
driver.put('output.mesh(mymesh).grid.zaxis.min',min_z)
driver.put('output.mesh(mymesh).grid.zaxis.max',max_z)
driver.put('output.mesh(mymesh).grid.zaxis.numpoints',num_pts_z)
# Note that the units are actually (nm), but confusingly the axes always display an extra (10^-3) so
# to counteract this I am just writing (um) and it looks like (um) x (10^-3)
driver.put('output.field(myfield4).about.label', 'E-Field Vector Rendering')
driver.put('output.field(myfield4).about.xaxis.label', 'X (um)', '0')
driver.put('output.field(myfield4).about.yaxis.label', 'Y (um)', '0')
driver.put('output.field(myfield4).about.zaxis.label', 'Z (um)', '0')
driver.put('output.field(myfield4).about.view', 'glyphs')
driver.put('output.field(myfield4).component.mesh', 'output.mesh(mymesh)')
driver.put('output.field(myfield4).component.elemtype', 'vectors')
driver.put('output.field(myfield4).component.elemsize', '3')
with open('EField_Vec', 'r') as VECfile:
driver.put('output.field(myfield4).component.values', VECfile.read(),append=True)
if os.path.exists('BField_Vec') and (Field_status == "2"):
# Prepare the vector mesh
# Mesh
driver.put('output.mesh(mymeshB).about.label', 'Object Mesh')
driver.put('output.mesh(mymeshB).dim', '3')
# driver.put('output.mesh(mymesh).units', 'um')
driver.put('output.mesh(mymeshB).hide', 'yes')
driver.put('output.mesh(mymeshB).grid.xaxis.min',min_x)
driver.put('output.mesh(mymeshB).grid.xaxis.max',max_x)
driver.put('output.mesh(mymeshB).grid.xaxis.numpoints',num_pts_x)
driver.put('output.mesh(mymeshB).grid.yaxis.min',min_y)
driver.put('output.mesh(mymeshB).grid.yaxis.max',max_y)
driver.put('output.mesh(mymeshB).grid.yaxis.numpoints',num_pts_y)
driver.put('output.mesh(mymeshB).grid.zaxis.min',min_z)
driver.put('output.mesh(mymeshB).grid.zaxis.max',max_z)
driver.put('output.mesh(mymeshB).grid.zaxis.numpoints',num_pts_z)
# Note that the units are actually (nm), but confusingly the axes always display an extra (10^-3) so
# to counteract this I am just writing (um) and it looks like (um) x (10^-3)
driver.put('output.field(myfield4B).about.label', 'B-Field Vector Rendering')
driver.put('output.field(myfield4B).about.xaxis.label', 'X (um)', '0')
driver.put('output.field(myfield4B).about.yaxis.label', 'Y (um)', '0')
driver.put('output.field(myfield4B).about.zaxis.label', 'Z (um)', '0')
driver.put('output.field(myfield4B).about.view', 'glyphs')
driver.put('output.field(myfield4B).component.mesh', 'output.mesh(mymeshB)')
driver.put('output.field(myfield4B).component.elemtype', 'vectors')
driver.put('output.field(myfield4B).component.elemsize', '3')
with open('BField_Vec', 'r') as VECfile:
driver.put('output.field(myfield4B).component.values', VECfile.read(),append=True)
# Prepare the Poynting vector mesh
# Mesh
driver.put('output.mesh(mymeshBC).about.label', 'Object Mesh')
driver.put('output.mesh(mymeshBC).dim', '3')
# driver.put('output.mesh(mymesh).units', 'um')
driver.put('output.mesh(mymeshBC).hide', 'yes')
driver.put('output.mesh(mymeshBC).grid.xaxis.min',min_x)
driver.put('output.mesh(mymeshBC).grid.xaxis.max',max_x)
driver.put('output.mesh(mymeshBC).grid.xaxis.numpoints',num_pts_x)
driver.put('output.mesh(mymeshBC).grid.yaxis.min',min_y)
driver.put('output.mesh(mymeshBC).grid.yaxis.max',max_y)
driver.put('output.mesh(mymeshBC).grid.yaxis.numpoints',num_pts_y)
driver.put('output.mesh(mymeshBC).grid.zaxis.min',min_z)
driver.put('output.mesh(mymeshBC).grid.zaxis.max',max_z)
driver.put('output.mesh(mymeshBC).grid.zaxis.numpoints',num_pts_z)
# Note that the units are actually (nm), but confusingly the axes always display an extra (10^-3) so
# to counteract this I am just writing (um) and it looks like (um) x (10^-3)
driver.put('output.field(myfield4BC).about.label', 'Poynting Vector Rendering')
driver.put('output.field(myfield4BC).about.xaxis.label', 'X (um)', '0')
driver.put('output.field(myfield4BC).about.yaxis.label', 'Y (um)', '0')
driver.put('output.field(myfield4BC).about.zaxis.label', 'Z (um)', '0')
driver.put('output.field(myfield4BC).about.view', 'glyphs')
driver.put('output.field(myfield4BC).component.mesh', 'output.mesh(mymeshBC)')
driver.put('output.field(myfield4BC).component.elemtype', 'vectors')
driver.put('output.field(myfield4BC).component.elemsize', '3')
with open('Poynting_Vec', 'r') as VECfile:
driver.put('output.field(myfield4BC).component.values', VECfile.read(),append=True)
# Remove other temp files here too
# os.remove('EField_VTK')
elif os.path.exists('EField_VTK.vtk') and (no_display_flag == 1):
try:
import zlib
mode = zipfile.ZIP_DEFLATED
except:
mode = zipfile.ZIP_STORED
try:
zip = zipfile.ZipFile('zipfilename_VTK','w',mode)
zip.write('EField_VTK.vtk')
zip.close()
driver.put('output.string(EField_VTK).current','zipfilename_VTK',type='file',compress=True)
except NameError:
pass
driver.put('output.string(failure).current','\nThe file "EField_VTK" was too large to print properly,\n', append=True)
driver.put('output.string(failure).current','however a zipped binary can still be downloaded via the download button.\n', append=True)
driver.put('output.string(failure).current','After downloading, the extension must be changed from .dat to .zip and unzipped.', append=True)
os.remove('EField_VTK.vtk')
# Output timing information.
if collect_timing == 'yes':
prefix = 'output.string(timing)'
driver.put(prefix + '.about.label', 'Timing Information')
driver.put(prefix + '.current', timing_info)
# Avoid writing files that crash Rappture:
if (os.path.exists('target.out')):
if (os.path.getsize('target.out') > 200000000):
try:
import zlib
mode = zipfile.ZIP_DEFLATED
except:
mode = zipfile.ZIP_STORED
try:
zip = zipfile.ZipFile('zipfilename','w',mode)
zip.write('target.out')
zip.close()
driver.put('output.string(target.out).about.label','target.out'+" (DDSCAT)")
driver.put('output.string(target.out).current','zipfilename',type='file',compress=True)
except NameError:
pass
os.remove('target.out')
driver.put('output.string(failure).current','\nThe file "target.out" was too large to print properly (200MB+),\n', append=True)
driver.put('output.string(failure).current','however a zipped binary can still be downloaded via the download button.\n', append=True)
driver.put('output.string(failure).current','After downloading, the extension must be changed from .dat to .zip and unzipped.', append=True)
for filename in ('mtable', 'qtable', 'qtable2', 'target.out','ddscat.par'):
prefix = 'output.string(%s)' % (filename,)
driver.put(prefix + '.about.label', filename + " (DDSCAT)")
if os.path.exists(filename):
with open(filename, 'r') as output_file:
driver.put(prefix + '.current', output_file.read())
output_file = ""
# Read the strings from the qtable
qtable_plot_data=[0,'.']
qtable2_plot_data=[0,'.']
read_buffer=0
# If qtable doesn't actually exist, the plots below will crash.
# The easiest way to avoid this is to make a fake qtable in this case.
if not (os.path.exists('qtable')):
with open('qtable','w') as fake_table:
fake_table.write('')
if not (os.path.exists('qtable2')):
with open('qtable2','w') as fake_table:
fake_table.write('')
with open('qtable','r') as plot_input_file:
for line in plot_input_file:
a = line
if (re.search('wave Q_ext',a)):
read_buffer=1
if (read_buffer==1):
qtable_plot_data.append(a)
with open('qtable2','r') as plot_input_file:
read_buffer = 0
for line in plot_input_file:
a = line
if (read_buffer==1):
qtable2_plot_data.append(a)
if (re.search('wave Q_pha',a)):
read_buffer=1
xy4 = ['0 0']
for item in qtable2_plot_data:
try:
qpha = item.split()[2]
wavepha = item.split()[1]
if ((not math.isnan(float(qpha))) and (not math.isnan(float(wavepha)))):
xy4.append(('\n{0} {1}').format(wavepha, qpha))
except (IndexError, AttributeError):
pass
# Write the data points from the Qtable to file
temp_counter = 0
item_number = 0
with open('qtable_data', 'w') as data_file:
for item in qtable_plot_data:
item_number = item_number + 1
if temp_counter == 1:
data_file.write(item)
temp_counter = 1
# Prepare the 3 sets of xy values to send to interface
# Want line corrosponding to item_number = 3, continuing until index end
xy1 = ['0 0']
xy2 = ['0 0']
xy3 = ['0 0']
reader_fail_flag = '0'
element_counter = 0
for element in qtable_plot_data:
if (element_counter >=3):
temp_counter2 = 0
for word in qtable_plot_data[element_counter].split():
if (len(word) <= 11):
if (temp_counter2 == 1):
xval = word
if (temp_counter2 == 2):
y1val = word
if (temp_counter2 == 3):
y2val = word
if (temp_counter2 == 4):
y3val = word
temp_counter2 = temp_counter2 + 1
if (len(word) > 11):
reader_fail_flag = '1'
xval = 0
y1val = 0
y2val = 0
y3val = 0
if ((not math.isnan(float(xval))) and (not math.isnan(float(y1val)))):
xy1.append(('\n{0} {1}').format(xval, y1val))
else:
reader_fail_flag = '1'
if ((not math.isnan(float(xval))) and (not math.isnan(float(y2val)))):
xy2.append(('\n{0} {1}').format(xval, y2val))
else:
reader_fail_flag = '1'
if ((not math.isnan(float(xval))) and (not math.isnan(float(y3val)))):
xy3.append(('\n{0} {1}').format(xval, y3val))
else:
reader_fail_flag = '1'
element_counter = element_counter + 1
# Plot the three desired wavelength crossections.
read_buffer_xy1 = 0
read_buffer_xy2 = 0
read_buffer_xy3 = 0
read_buffer_xy4 = 0
for pair in xy1:
if read_buffer_xy1 == 1:
driver.put('output.curve(qtable).component.xy', pair, append=True)
read_buffer_xy1 = 1
for pair in xy2:
if read_buffer_xy2 == 1:
driver.put('output.curve(qtable2).component.xy', pair, append=True)
read_buffer_xy2 = 1
for pair in xy3:
if read_buffer_xy3 == 1:
driver.put('output.curve(qtable3).component.xy', pair, append=True)
read_buffer_xy3 = 1
for pair in xy4:
if read_buffer_xy4 == 1:
driver.put('output.curve(qtable4).component.xy', pair, append=True)
read_buffer_xy4 = 1
if reader_fail_flag == '1' and ddscat_fail_message == '':
driver.put('output.string(failure).about.label','***SIMULATION STATUS***')
driver.put('output.string(failure).current','<<< DDSCAT has encountered an error in processing. >>>\n\n Resultant data displayed for the settings attempted is void. \n')
driver.put('output.string(failure).current','\nCrash Log:\n', append=True)
driver.put('output.string(failure).current',' Inappropriate Values were returned for Light Absorption, Scattering, and/or Extinction', append=True)
driver.put('output.string(failure).current','\n See mtable, qtable, qtable2 for more information on the Inappropriate Values', append=True)
driver.put('output.string(failure).current', '\n# Nearfield Not Requested or Not Usable', append=True)
reader_fail_flag = '2'
if os.path.exists('submit_results/submit_log'):
driver.put('output.string(sublog).about.label', 'Remote Submission Log')
with open('submit_results/submit_log','r') as sublogfile:
driver.put('output.string(sublog).current', sublogfile.read())
if os.path.exists('submit_results_efield/submit_log'):
with open('submit_results_efield/submit_log','r') as sublogfile:
driver.put('output.string(sublog).current', '\n\n Nearfield: \n', append=True)
driver.put('output.string(sublog).current', sublogfile.read(), append=True)
email = driver.get('input.phase(page5).group(process).boolean(email).current')
if email == "yes":
command = ["submit"]
command.append("--progress")
command.append("silent")
command.append("mail2self")
command.append("-t")
command.append("Please check Nanohub.org for your simulation results")
command.append("-s")
if ddscat_fail_flag == "1" or Efield_fail_flag == "1" or reader_fail_flag == "2":
command.append("nanoDDSCAT+ Simulation #{0} Failed".format(driver_number))
else:
command.append("nanoDDSCAT+ Simulation #{0} Completed".format(driver_number))
# Send out an email about the remote submission
exit_status, stdout, stderr = RapptureExec(command, streamOutput=False)
driver.result(0)
# Remove created files from the working directory as post-processing cleanup.
remove_all_w(int(params['NWAV']))
for filename in ('custom_dielectric1','custom_dielectric2','custom_dielectric3','custom_dielectric4',
'custom_dielectric5','custom_dielectric6','custom_dielectric7','custom_dielectric8',
'custom_dielectric9', 'mtable','qtable', 'qtable2', 'qtable_data',
'Styles', 'field_datafile.txt',
'EField_VTK.vtk','BField_VTK.vtk','EField_VTK_logscale.vtk',
'EField_Vec','BField_Vec','Poynting_Vec',
'AuDiel.tab','AgDiel.tab','TiO2','SiO2','Pt_diel.tab','Pd_diel.tab','Cu_diel.txt',
'shape.dat', 'ddscat.par','target.out','ddpostprocess.par','zipfilename',
'zipfilename_VTK', 'composition.txt'):
if os.path.exists(filename):
# 1
os.remove(filename)
if os.path.exists('submit_results'):
shutil.rmtree('submit_results', ignore_errors = True)
if os.path.exists('submit_results_efield'):
shutil.rmtree('submit_results_efield', ignore_errors = True)
|
NanoBioNode/nanoDDSCATplus
|
rappture/dda/ddscat.py
|
Python
|
gpl-3.0
| 105,014
|
[
"VTK"
] |
54a6d0ff786441701ec7ec1d338ee63d5892796b9cc05f365c1a64d77b4940c2
|
from edc_constants.constants import SCHEDULED, UNSCHEDULED, NO, YES, OTHER
from lab_requisition.forms import RequisitionFormMixin
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminRadioSelect, AdminRadioFieldRenderer
from td_maternal.models import MaternalVisit
from tshilo_dikotla.choices import STUDY_SITES
from ..models import MaternalRequisition
class MaternalRequisitionForm(RequisitionFormMixin):
study_site = forms.ChoiceField(
label='Study site',
choices=STUDY_SITES,
initial=settings.DEFAULT_STUDY_SITE,
help_text="",
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
def __init__(self, *args, **kwargs):
super(MaternalRequisitionForm, self).__init__(*args, **kwargs)
self.fields['item_type'].initial = 'tube'
def clean(self):
cleaned_data = super(MaternalRequisitionForm, self).clean()
self.validate_drawing_requisitions(cleaned_data)
self.validate_requisition_and_drawn_datetime()
return cleaned_data
def validate_drawing_requisitions(self, cleaned_data):
cleaned_data = self.cleaned_data
if cleaned_data.get('is_drawn') == YES and not cleaned_data.get('drawn_datetime'):
raise forms.ValidationError("A specimen was collected. Please provide the date and time collected.")
if cleaned_data.get('is_drawn') == NO and cleaned_data.get('drawn_datetime'):
raise forms.ValidationError("A specimen was not collected, date and time collected NA.")
if cleaned_data.get('is_drawn') == NO and not cleaned_data.get('reason_not_drawn'):
raise forms.ValidationError("Please provide a reason why the specimen was not collected.")
if cleaned_data.get('is_drawn') == YES and cleaned_data.get('reason_not_drawn'):
raise forms.ValidationError(
"A specimen was not drawn. Do not provided a reason why it was not collected.")
if cleaned_data.get('is_drawn') == YES and cleaned_data.get('reason_not_drawn_other'):
raise forms.ValidationError(
"A specimen was drawn. Do not provided a reason why it was not collected.")
if cleaned_data.get('reason_not_drawn') == 'other' and not cleaned_data.get('reason_not_drawn_other'):
raise forms.ValidationError(
"Please specify Other reason why requisition was not drawn.")
def validate_requisition_and_drawn_datetime(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('drawn_datetime'):
if cleaned_data.get('drawn_datetime').date() < cleaned_data.get('requisition_datetime').date():
raise forms.ValidationError(
'Requisition date cannot be in future of specimen date. Specimen draw date is '
'indicated as {}, whilst requisition is indicated as{}. Please correct'.format(
cleaned_data.get('drawn_datetime').date(),
cleaned_data.get('requisition_datetime').date()))
if (
cleaned_data.get('panel').name == 'Vaginal swab (Storage)' or
cleaned_data.get('panel').name == 'Rectal swab (Storage)' or
cleaned_data.get('panel').name == 'Skin Swab (Storage)' or
cleaned_data.get('panel').name == 'Vaginal STI Swab (Storage)'
):
if cleaned_data.get('item_type') != 'swab':
raise forms.ValidationError(
'Panel is a swab therefore collection type is swab. Please correct.')
else:
if cleaned_data.get('item_type') != 'tube':
raise forms.ValidationError('Panel {} can only be tube therefore collection type is swab. '
'Please correct.'.format(cleaned_data.get('panel').name))
maternal_visit = MaternalVisit.objects.get(
appointment__registered_subject=cleaned_data.get(
'maternal_visit').appointment.registered_subject,
appointment=cleaned_data.get('maternal_visit').appointment,
appointment__visit_instance=cleaned_data.get('maternal_visit').appointment.visit_instance)
if maternal_visit:
if ((maternal_visit.reason == SCHEDULED or maternal_visit.reason == UNSCHEDULED) and
cleaned_data.get('reason_not_drawn') == 'absent'):
raise forms.ValidationError(
'Reason not drawn cannot be {}. Visit report reason is {}'.format(
cleaned_data.get('reason_not_drawn'),
maternal_visit.reason))
return cleaned_data
class Meta:
model = MaternalRequisition
fields = '__all__'
|
botswana-harvard/tshilo-dikotla
|
td_lab/forms/maternal_requisition_form.py
|
Python
|
gpl-2.0
| 4,788
|
[
"VisIt"
] |
deff75bd073ed788f139f772e73f6f7d4cfe4bdf74ecd2acdd025b88a8e170e0
|
r"""
Vibro-acoustic problem
3D acoustic domain with 2D perforated deforming interface.
*Master problem*: defined in 3D acoustic domain (``vibro_acoustic3d.py``)
*Slave subproblem*: 2D perforated interface (``vibro_acoustic3d_mid.py``)
Master 3D problem - find :math:`p` (acoustic pressure)
and :math:`g` (transversal acoustic velocity) such that:
.. math::
c^2 \int_{\Omega} \nabla q \cdot \nabla p
- \omega^2 \int_{\Omega} q p
+ i \omega c \int_{\Gamma_{in}} q p
+ i \omega c \int_{\Gamma_{out}} q p
- i \omega c^2 \int_{\Gamma_0} (q^+ - q^-) g
= 2i \omega c \int_{\Gamma_{in}} q \bar{p}
\;, \quad \forall q \;,
- i \omega \int_{\Gamma_0} f (p^+ - p^-)
- \omega^2 \int_{\Gamma_0} F f g
+ \omega^2 \int_{\Gamma_0} C f w
= 0
\;, \quad \forall f \;,
Slave 2D subproblem - find :math:`w` (plate deflection)
and :math:`\ul{\theta}` (rotation) such that:
.. math::
\omega^2 \int_{\Gamma_0} C z g
- \omega^2 \int_{\Gamma_0} S z w
+ \int_{\Gamma_0} \nabla z \cdot \ull{G} \cdot \nabla w
- \int_{\Gamma_0} \ul{\theta} \cdot \ull{G} \cdot \nabla z
= 0
\;, \quad \forall z \;,
- \omega^2 \int_{\Gamma_0} R\, \ul{\nu} \cdot \ul{\theta}
+ \int_{\Gamma_0} D_{ijkl} e_{ij}(\ul{\nu}) e_{kl}(\ul{\theta})
- \int_{\Gamma_0} \ul{\nu} \cdot \ull{G} \cdot \nabla w
+ \int_{\Gamma_0} \ul{\nu} \cdot \ull{G} \cdot \ul{\theta}
= 0
\;, \quad \forall \ul{\nu} \;,
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.mechanics.matcoefs import stiffness_from_lame
filename_mesh = '../../meshes/2d/acoustic_wg_mid.vtk'
sound_speed = 343.0
wave_num = 5.5
thickness = 0.01
c = sound_speed
c2 = c**2
w = wave_num * c
w2 = w**2
wc = w * c
wc2 = w * c2
regions = {
'Gamma0': 'all',
'Left': ('vertices in (x < 0.001)', 'facet'),
'Right': ('vertices in (x > 0.299)', 'facet'),
}
fields = {
'deflection': ('complex', 'scalar', 'Gamma0', 1),
'rotation': ('complex', 'vector', 'Gamma0', 1),
'tvelocity': ('complex', 'scalar', 'Gamma0', 1),
}
variables = {
'w': ('unknown field', 'deflection'),
'z': ('test field', 'deflection', 'w'),
'theta': ('unknown field', 'rotation'),
'nu': ('test field', 'rotation', 'theta'),
'g0': ('unknown field', 'tvelocity'),
'f0': ('test field', 'tvelocity', 'g0'),
}
ebcs = {
'fixed_l': ('Left', {'w.0': 0.0, 'theta.all': 0.0}),
'fixed_r': ('Right', {'w.0': 0.0, 'theta.all': 0.0}),
}
options = {
}
materials = {
'ac' : ({'c': -1.064e+00, 'T': 9.202e-01,
'hG': thickness * 4.5e10 * nm.eye(2),
'hR': thickness * 0.71,
'h3R': thickness**3 / 3.0 * 0.71,
'h3C': thickness**3 / 3.0 * stiffness_from_lame(2, 1e1, 1e0)}, ),
}
equations = {
'eq_3': """
%e * dw_dot.5.Gamma0(ac.c, z, g0)
- %e * dw_dot.5.Gamma0(ac.T, z, w)
- %e * dw_dot.5.Gamma0(ac.hR, z, w)
+ dw_diffusion.5.Gamma0(ac.hG, z, w)
- dw_v_dot_grad_s.5.Gamma0(ac.hG, theta, z)
= 0"""\
% (w2, w2, w2),
'eq_4': """
- %e * dw_dot.5.Gamma0(ac.h3R, nu, theta)
+ dw_lin_elastic.5.Gamma0(ac.h3C, nu, theta)
- dw_v_dot_grad_s.5.Gamma0(ac.hG, nu, w)
+ dw_dot.5.Gamma0(ac.hG, nu, theta)
= 0"""\
% (w2, ),
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-4,
'eps_r' : 1e-4,
})
}
|
vlukes/sfepy
|
examples/acoustics/vibro_acoustic3d_mid.py
|
Python
|
bsd-3-clause
| 3,492
|
[
"VTK"
] |
d5e485b1338b63fb37ce20ccf0fb4eb517eee4c5c5634608f7e07e95eeb99fa4
|
from __future__ import print_function
from __future__ import absolute_import
from .DataTestTemplate import _DataTest
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.cell import Cell
from PyOpenWorm.connection import Connection
from PyOpenWorm.context import Context
class NeuronTest(_DataTest):
ctx_classes = (Neuron, Connection)
def setUp(self):
_DataTest.setUp(self)
self.neur = lambda x: self.ctx.Neuron(name=x)
def test_Cell(self):
do = self.neur('BDUL')
self.assertTrue(isinstance(do, Cell))
def test_receptors(self):
n = self.neur('AVAL')
n.receptor('GLR-2')
self.save()
self.assertIn('GLR-2', list(self.neur('AVAL').receptors()))
def test_same_name_same_id(self):
"""
Test that two Neuron objects with the same name have the same
identifier. Saves us from having too many inserts of the same object.
"""
c = Neuron(name="boots")
c1 = Neuron(name="boots")
self.assertEqual(c.identifier, c1.identifier)
def test_type(self):
n = self.neur('AVAL')
n.type('interneuron')
self.save()
self.assertEqual('interneuron', self.neur('AVAL').type.one())
def test_name(self):
"""
Test that the name property is set when the neuron is initialized
with it
"""
self.assertEqual('AVAL', self.neur('AVAL').name())
self.assertEqual('AVAR', self.neur('AVAR').name())
def test_neighbor(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
neighbors = list(n.neighbor())
self.assertIn(self.neur('PVCL'), neighbors)
self.save()
self.assertIn(self.neur('PVCL'), list(self.neur('AVAL').neighbor()))
def test_neighbor_count(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.save()
p = self.ctx.Neuron()
self.neur('AVAL').neighbor(p)
self.assertEqual(1, p.count())
def test_neighbor_count_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.assertEqual(1, n.neighbor.count())
def test_neighbor_count_context_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
ctx1 = Context(ident='http://example.org/ctx1')
self.assertEqual(0, ctx1(n).neighbor.count())
def test_connection_count(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.save()
self.assertEqual(1, self.neur('AVAL').connection.count())
def test_connection_count_staged(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.assertEqual(1, n.connection.count())
def test_neighbor_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.neighbor(n1)
self.assertEqual(set(), set(ctx1(n0).neighbor()))
def test_connection_get_staged(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(1, len(n0.connection()))
def test_connection_only_defined(self):
n0 = self.ctx.Neuron(name='NEURON0')
n0.connection(self.ctx.Connection())
self.assertEqual(0, len(n0.connection()))
def test_connection_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(set(), set(ctx1(n0).connection()))
def test_init_from_lineage_name(self):
c = self.ctx.Neuron(lineageName="AB plapaaaap", name="ADAL")
self.save()
c = self.context.query(Neuron)(lineageName="AB plapaaaap")
self.assertEqual(c.name(), 'ADAL')
|
gsarma/PyOpenWorm
|
tests/NeuronTest.py
|
Python
|
mit
| 4,199
|
[
"NEURON"
] |
9acf82ce79531a1a6d27a5116366eb4119f73e671ee1b69d09e5d160094a4fe0
|
# coding: utf-8
from __future__ import unicode_literals
"""
This module provides classes to define everything related to band structures.
"""
__author__ = "Geoffroy Hautier, Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "March 14, 2012"
import numpy as np
import math
import itertools
import collections
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.serializers.json_coders import PMGSONable
class Kpoint(PMGSONable):
"""
Class to store kpoint objects. A kpoint is defined with a lattice and frac
or cartesian coordinates syntax similar than the site object in
pymatgen.core.structure.
Args:
coords: coordinate of the kpoint as a numpy array
lattice: A pymatgen.core.lattice.Lattice lattice object representing
the reciprocal lattice of the kpoint
to_unit_cell: Translates fractional coordinate to the basic unit
cell, i.e., all fractional coordinates satisfy 0 <= a < 1.
Defaults to False.
coords_are_cartesian: Boolean indicating if the coordinates given are
in cartesian or fractional coordinates (by default fractional)
label: the label of the kpoint if any (None by default)
"""
def __init__(self, coords, lattice, to_unit_cell=False,
coords_are_cartesian=False, label=None):
self._lattice = lattice
self._fcoords = lattice.get_fractional_coords(coords) \
if coords_are_cartesian else coords
self._label = label
if to_unit_cell:
for i in range(len(self._fcoords)):
self._fcoords[i] -= math.floor(self._fcoords[i])
self._ccoords = lattice.get_cartesian_coords(self._fcoords)
@property
def lattice(self):
"""
The lattice associated with the kpoint. It's a
pymatgen.core.lattice.Lattice object
"""
return self._lattice
@property
def label(self):
"""
The label associated with the kpoint
"""
return self._label
@property
def frac_coords(self):
"""
The fractional coordinates of the kpoint as a numpy array
"""
return np.copy(self._fcoords)
@property
def cart_coords(self):
"""
The cartesian coordinates of the kpoint as a numpy array
"""
return np.copy(self._ccoords)
@property
def a(self):
"""
Fractional a coordinate of the kpoint
"""
return self._fcoords[0]
@property
def b(self):
"""
Fractional b coordinate of the kpoint
"""
return self._fcoords[1]
@property
def c(self):
"""
Fractional c coordinate of the kpoint
"""
return self._fcoords[2]
def __str__(self):
"""
Returns a string with fractional, cartesian coordinates and label
"""
return "{} {} {}".format(self.frac_coords, self.cart_coords,
self.label)
def as_dict(self):
"""
Json-serializable dict representation of a kpoint
"""
return {"lattice": self.lattice.as_dict(),
"fcoords": list(self.frac_coords),
"ccoords": list(self.cart_coords), "label": self.label,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class BandStructure(object):
"""
This is the most generic band structure data possible
it's defined by a list of kpoints + energies for each of them
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up
lattice: The reciprocal lattice as a pymatgen Lattice object.
label_dict: (dict) of {} this link a kpoint (in frac coords or
cartesian coordinates depending on the coords).
coords_are_cartesian: Whether coordinates are cartesian.
efermi: fermi energy
labels_dict: (dict) of {} this links a kpoint (in frac coords or
cartesian coordinates depending on the coords) to a label.
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure
projections: dict of orbital projections for spin up and spin down
{Spin.up:[][{Orbital:[]}],Spin.down:[][{Orbital:[]}]. The
format follows the one from eigenvals: The first index of the
array refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. For each band and kpoint, we associate a
dictionary indicating projections on orbitals and on different
sites the keys of the dictionary are Orbital objects and the
values are the projections on each site ordered as in the
structure object. If the band structure is not spin polarized,
we only store one data set under Spin.up.
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict=None,
coords_are_cartesian=False, structure=None, projections=None):
self._efermi = efermi
self._lattice_rec = lattice
self._kpoints = []
self._labels_dict = {}
self._structure = structure
self._projections = projections if projections else {}
if labels_dict is None:
labels_dict = {}
if len(self._projections) != 0 and self._structure is None:
raise Exception("if projections are provided a structure object"
" needs also to be given")
for k in kpoints:
#let see if this kpoint has been assigned a label
label = None
for c in labels_dict:
if np.linalg.norm(k - np.array(labels_dict[c])) < 0.0001:
label = c
self._labels_dict[label] = Kpoint(
k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian)
self._kpoints.append(
Kpoint(k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian))
self._bands = eigenvals
self._nb_bands = len(eigenvals[Spin.up])
self._is_spin_polarized = False
if len(self._bands) == 2:
self._is_spin_polarized = True
@property
def kpoints(self):
"""
the list of kpoints (as Kpoint objects) in the band structure
"""
return self._kpoints
@property
def lattice(self):
"""
the lattice of the band structure as a pymatgen Lattice object
"""
return self._lattice_rec
@property
def efermi(self):
"""
the fermi energy
"""
return self._efermi
@property
def is_spin_polarized(self):
"""
True if the band structure is spin-polarized, False otherwise
"""
return self._is_spin_polarized
@property
def bands(self):
"""
returns the eigenvalues for each kpoints as a dictionary
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
self.kpoints. If the band structure is not spin polarized, we
only store one data set under Spin.up
"""
return self._bands
@property
def nb_bands(self):
"""
returns the number of bands in the band structure
"""
return self._nb_bands
def get_projection_on_elements(self):
"""
Method returning a dictionary of projections on elements.
Returns:
a dictionary in the {Spin.up:[][{Element:values}],
Spin.down:[][{Element:values}]} format
if there is no projections in the band structure
returns an empty dict
"""
if len(self._projections) == 0:
return {}
if self.is_spin_polarized:
result = {Spin.up: [], Spin.down: []}
else:
result = {Spin.up: []}
structure = self._structure
for spin in result:
result[spin] = [[collections.defaultdict(float)
for i in range(len(self._kpoints))]
for j in range(self._nb_bands)]
for i, j, k in itertools.product(list(range(self._nb_bands)),
list(range(len(self._kpoints))),
list(range(structure.num_sites))):
for orb in self._projections[Spin.up][i][j]:
result[spin][i][j][str(structure[k].specie)] += \
self._projections[spin][i][j][orb][k]
return result
def get_projections_on_elts_and_orbitals(self, dictio):
"""
Method returning a dictionary of projections on elements and specific
orbitals
Args:
dictio: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Cu':['d','s']}
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict.
"""
if len(self._projections) == 0:
return {}
if self.is_spin_polarized:
result = {Spin.up: [], Spin.down: []}
else:
result = {Spin.up: []}
structure = self._structure
for spin in result:
result[spin] = [[{str(e): collections.defaultdict(float)
for e in dictio}
for i in range(len(self._kpoints))]
for j in range(self._nb_bands)]
for i, j, k in itertools.product(
list(range(self._nb_bands)), list(range(len(self._kpoints))),
list(range(structure.num_sites))):
for orb in self._projections[Spin.up][i][j]:
if str(structure[k].specie) in dictio:
if str(orb)[0] in dictio[str(structure[k].specie)]:
result[spin][i][j][str(structure[k].specie)]\
[str(orb)[0]] += \
self._projections[spin][i][j][orb][k]
return result
def is_metal(self):
"""
Check if the band structure indicates a metal by looking if the fermi
level crosses a band.
Returns:
True if a metal, False if not
"""
for i in range(self._nb_bands):
below = False
above = False
for j in range(len(self._kpoints)):
if self._bands[Spin.up][i][j] < self._efermi:
below = True
if self._bands[Spin.up][i][j] > self._efermi:
above = True
if above and below:
return True
if self.is_spin_polarized:
below = False
above = False
for j in range(len(self._kpoints)):
if self._bands[Spin.down][i][j] < self._efermi:
below = True
if self._bands[Spin.down][i][j] > self._efermi:
above = True
if above and below:
return True
return False
def get_vbm(self):
"""
Returns data about the VBM.
Returns:
dict as {"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self._kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = -float("inf")
index = None
kpointvbm = None
for i in range(self._nb_bands):
for j in range(len(self._kpoints)):
for spin in self._bands:
if self._bands[spin][i][j] < self._efermi:
if self._bands[spin][i][j] > max_tmp:
max_tmp = self._bands[spin][i][j]
index = j
kpointvbm = self._kpoints[j]
list_ind_kpts = []
if kpointvbm.label is not None:
for i in range(len(self._kpoints)):
if self._kpoints[i].label == kpointvbm.label:
list_ind_kpts.append(i)
else:
list_ind_kpts.append(index)
#get all other bands sharing the vbm
list_ind_band = {Spin.up: []}
if self.is_spin_polarized:
list_ind_band = {Spin.up: [], Spin.down: []}
for spin in self._bands:
for i in range(self._nb_bands):
if math.fabs(self._bands[spin][i][index] - max_tmp) < 0.001:
list_ind_band[spin].append(i)
proj = {}
if len(self._projections) != 0:
for spin in list_ind_band:
if len(list_ind_band[spin]) == 0:
continue
proj[spin] =\
self._projections[spin][list_ind_band[spin][0]][
list_ind_kpts[0]]
return {'band_index': list_ind_band,
'kpoint_index': list_ind_kpts,
'kpoint': kpointvbm, 'energy': max_tmp,
'projections': proj}
def get_cbm(self):
"""
Returns data about the CBM.
Returns:
{"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self._kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = float("inf")
index = None
kpointcbm = None
for spin in self._bands:
for i in range(self._nb_bands):
for j in range(len(self._kpoints)):
if self._bands[spin][i][j] > self._efermi:
if self._bands[spin][i][j] < max_tmp:
max_tmp = self._bands[spin][i][j]
index = j
kpointcbm = self._kpoints[j]
list_index_kpoints = []
if kpointcbm.label is not None:
for i in range(len(self._kpoints)):
if self._kpoints[i].label == kpointcbm.label:
list_index_kpoints.append(i)
else:
list_index_kpoints.append(index)
#get all other bands sharing the vbm
list_index_band = {Spin.up: []}
if self.is_spin_polarized:
list_index_band = {Spin.up: [], Spin.down: []}
for spin in self._bands:
for i in range(self._nb_bands):
if math.fabs(self._bands[spin][i][index] - max_tmp) < 0.001:
list_index_band[spin].append(i)
proj = {}
if len(self._projections) != 0:
for spin in list_index_band:
if len(list_index_band[spin]) == 0:
continue
proj[spin] = self._projections[spin][list_index_band[spin][0]][
list_index_kpoints[0]]
return {'band_index': list_index_band,
'kpoint_index': list_index_kpoints,
'kpoint': kpointcbm, 'energy': max_tmp,
'projections': proj}
def get_band_gap(self):
"""
Returns band gap data.
Returns:
A dict {"energy","direct","transition"}:
"energy": band gap energy
"direct": A boolean telling if the gap is direct or not
"transition": kpoint labels of the transition (e.g., "\Gamma-X")
"""
if self.is_metal():
return {"energy": 0.0, "direct": False, "transition": None}
cbm = self.get_cbm()
vbm = self.get_vbm()
result = dict(direct=False, energy=0.0, transition=None)
result["energy"] = cbm["energy"] - vbm["energy"]
if cbm["kpoint"].label == vbm["kpoint"].label or \
np.linalg.norm(cbm["kpoint"].cart_coords
- vbm["kpoint"].cart_coords) < 0.01:
result["direct"] = True
result["transition"] = "-".join(
[str(c.label) if c.label is not None else
str("(") + ",".join(["{0:.3f}".format(c.frac_coords[i])
for i in range(3)])
+ str(")") for c in [vbm["kpoint"], cbm["kpoint"]]])
return result
def get_direct_band_gap(self):
"""
Returns the direct band gap.
Returns:
the value of the direct band gap
"""
if self.is_metal():
return 0.0
lowest_conduction_band = []
highest_valence_band = []
for j in range(len(self._bands[Spin.up])):
for i in range(len(self.kpoints)):
if self._bands[Spin.up][j][i] > self._efermi:
lowest_conduction_band.append(self._bands[Spin.up][j][i])
highest_valence_band.append(self._bands[Spin.up][j-1][i])
if self.is_spin_polarized:
lowest_conduction_band_d = []
highest_valence_band_d = []
for j in range(len(self._bands[Spin.down])):
for i in range(len(self.kpoints)):
if self._bands[Spin.down][j][i] > self._efermi:
lowest_conduction_band_d.append(self._bands[Spin.down][j][i])
highest_valence_band_d.append(self._bands[Spin.down][j-1][i])
diff = []
for i in range(len(self.kpoints)):
diff.append(min([lowest_conduction_band[i],lowest_conduction_band_d[i]])
- max([highest_valence_band[i],highest_valence_band_d[i]]))
return min(diff)
diff = []
for i in range(len(self.kpoints)):
diff.append(lowest_conduction_band[i] - highest_valence_band[i])
return min(diff)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self._lattice_rec.as_dict(), "efermi": self._efermi,
"kpoints": []}
#kpoints are not kpoint objects dicts but are frac coords (this makes
#the dict smaller and avoids the repetition of the lattice
for k in self._kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["bands"] = {str(int(spin)): self._bands[spin]
for spin in self._bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): {str(orb):
vbm['projections'][spin][orb]
for orb in vbm['projections'][spin]}
for spin in vbm['projections']}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): {str(orb):
cbm['projections'][spin][orb]
for orb in cbm['projections'][spin]}
for spin in cbm['projections']}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
for c in self._labels_dict:
d['labels_dict'][c] = self._labels_dict[c].as_dict()['fcoords']
d['projections'] = {}
if len(self._projections) != 0:
d['structure'] = self._structure.as_dict()
d['projections'] = {
str(int(spin)): [
[{str(orb): [
self._projections[spin][i][j][orb][k]
for k in range(len(self._projections[spin][i][j][orb]))]
for orb in self._projections[spin][i][j]}
for j in range(len(self._projections[spin][i]))]
for i in range(len(self._projections[spin]))]
for spin in self._projections}
return d
@classmethod
def from_dict(cls, d):
"""
Create from dict.
Args:
A dict with all data for a band structure object.
Returns:
A BandStructure object
"""
labels_dict = d['labels_dict']
projections = {}
structure = None
if 'structure' in d:
structure = Structure.from_dict(d['structure'])
if 'projections' in d and len(d['projections']) != 0:
projections = {
Spin.from_int(int(spin)): [
[{Orbital.from_string(orb): [
d['projections'][spin][i][j][orb][k]
for k in range(len(d['projections'][spin][i][j][orb]))]
for orb in d['projections'][spin][i][j]}
for j in range(len(d['projections'][spin][i]))]
for i in range(len(d['projections'][spin]))]
for spin in d['projections']}
return BandStructure(
d['kpoints'], {Spin.from_int(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
class BandStructureSymmLine(BandStructure, PMGSONable):
"""
This object stores band structures along selected (symmetry) lines in the
Brillouin zone. We call the different symmetry lines (ex: \Gamma to Z)
"branches".
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up.
lattice: The reciprocal lattice.
efermi: fermi energy
label_dict: (dict) of {} this link a kpoint (in frac coords or
cartesian coordinates depending on the coords).
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure.
projections: dict of orbital projections for spin up and spin down
{Spin.up:[][{Orbital:[]}],Spin.down:[][{Orbital:[]}]. The
format follows the one from eigenvals: the first index of the
array refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. For each band and kpoint, we associate a
dictionary indicating projections on orbitals and on different
sites the keys of the dictionary are Orbital objects and the
values are the projections on each site ordered as in the
structure object. If the band structure is not spin polarized,
we only store one data set under Spin.up.
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict,
coords_are_cartesian=False, structure=None,
projections=None):
BandStructure.__init__(self, kpoints, eigenvals, lattice, efermi,
labels_dict, coords_are_cartesian, structure,
projections)
self._distance = []
self._branches = []
one_group = []
branches_tmp = []
#get labels and distance for each kpoint
previous_kpoint = self._kpoints[0]
previous_distance = 0.0
previous_label = self._kpoints[0].label
for i in range(len(self._kpoints)):
label = self._kpoints[i].label
if label is not None and previous_label is not None:
self._distance.append(previous_distance)
else:
self._distance.append(
np.linalg.norm(self._kpoints[i].cart_coords -
previous_kpoint.cart_coords) +
previous_distance)
previous_kpoint = self._kpoints[i]
previous_distance = self._distance[i]
if label:
if previous_label:
if len(one_group) != 0:
branches_tmp.append(one_group)
one_group = []
previous_label = label
one_group.append(i)
if len(one_group) != 0:
branches_tmp.append(one_group)
for b in branches_tmp:
self._branches.append({"start_index": b[0], "end_index": b[-1],
"name": (self._kpoints[b[0]].label + "-" +
self._kpoints[b[-1]].label)})
self._is_spin_polarized = False
if len(self._bands) == 2:
self._is_spin_polarized = True
def get_equivalent_kpoints(self, index):
"""
Returns the list of kpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the kpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
"""
#if the kpoint has no label it can"t have a repetition along the band
#structure line object
if self._kpoints[index].label is None:
return [index]
list_index_kpoints = []
for i in range(len(self._kpoints)):
if self._kpoints[i].label == self._kpoints[index].label:
list_index_kpoints.append(i)
return list_index_kpoints
def get_branch(self, index):
"""
Returns in what branch(es) is the kpoint. There can be several
branches.
Args:
index: the kpoint index
Returns:
A list of dictionaries [{"name","start_index","end_index","index"}]
indicating all branches in which the k_point is. It takes into
account the fact that one kpoint (e.g., \Gamma) can be in several
branches
"""
to_return = []
for i in self.get_equivalent_kpoints(index):
for b in self._branches:
if b["start_index"] <= i <= b["end_index"]:
to_return.append({"name": b["name"],
"start_index": b["start_index"],
"end_index": b["end_index"],
"index": i})
return to_return
def apply_scissor(self, new_band_gap):
"""
Apply a scissor operator (shift of the CBM) to fit the given band gap.
If it's a metal. We look for the band crossing the fermi level
and shift this one up. This will not work all the time for metals!
Args:
new_band_gap: the band gap the scissor band structure need to have.
Returns:
a BandStructureSymmLine object with the applied scissor shift
"""
if self.is_metal():
#moves then the highest index band crossing the fermi level
#find this band...
max_index = -1000
#spin_index = None
for i in range(self._nb_bands):
below = False
above = False
for j in range(len(self._kpoints)):
if self._bands[Spin.up][i][j] < self._efermi:
below = True
if self._bands[Spin.up][i][j] > self._efermi:
above = True
if above and below:
if i > max_index:
max_index = i
#spin_index = Spin.up
if self.is_spin_polarized:
below = False
above = False
for j in range(len(self._kpoints)):
if self._bands[Spin.down][i][j] < self._efermi:
below = True
if self._bands[Spin.down][i][j] > self._efermi:
above = True
if above and below:
if i > max_index:
max_index = i
#spin_index = Spin.down
old_dict = self.as_dict()
shift = new_band_gap
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if k >= max_index:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
else:
shift = new_band_gap - self.get_band_gap()['energy']
old_dict = self.as_dict()
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if old_dict['bands'][spin][k][v] >= \
old_dict['cbm']['energy']:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
old_dict['efermi'] = old_dict['efermi'] + shift
return BandStructureSymmLine.from_dict(old_dict)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self._lattice_rec.as_dict(), "efermi": self._efermi,
"kpoints": []}
#kpoints are not kpoint objects dicts but are frac coords (this makes
#the dict smaller and avoids the repetition of the lattice
for k in self._kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["branches"] = self._branches
d["bands"] = {str(int(spin)): self._bands[spin]
for spin in self._bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): {str(orb):
vbm['projections'][spin][orb]
for orb in vbm['projections'][spin]}
for spin in vbm['projections']}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): {str(orb):
cbm['projections'][spin][orb]
for orb in cbm['projections'][spin]}
for spin in cbm['projections']}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
# MongoDB does not accept keys starting with $. Add a blanck space to fix the problem
for c in self._labels_dict:
mongo_key = c if not c.startswith("$") else " " + c
d['labels_dict'][mongo_key] = self._labels_dict[c].as_dict()['fcoords']
d['projections'] = {}
if len(self._projections) != 0:
d['structure'] = self._structure.as_dict()
d['projections'] = {
str(int(spin)): [
[{str(orb): [
self._projections[spin][i][j][orb][k]
for k in range(len(self._projections[spin][i][j][orb]))]
for orb in self._projections[spin][i][j]}
for j in range(len(self._projections[spin][i]))]
for i in range(len(self._projections[spin]))]
for spin in self._projections}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
A dict with all data for a band structure symm line object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {
Spin.from_int(int(spin)): [
[{Orbital.from_string(orb): [
d['projections'][spin][i][j][orb][k]
for k in range(len(d['projections'][spin][i][j][orb]))]
for orb in d['projections'][spin][i][j]}
for j in range(len(d['projections'][spin][i]))]
for i in range(len(d['projections'][spin]))]
for spin in d['projections']}
return BandStructureSymmLine(
d['kpoints'], {Spin.from_int(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
def get_reconstructed_band_structure(list_bs, efermi=None):
"""
This method takes a list of band structures
and reconstruct one band structure object from all of them
this is typically very useful when you split non self consistent
band structure runs in several independent jobs and want to merge back
the results
Args:
list_bs: A list of BandStructure
efermi: The fermi energy of the reconstructed band structure. If
None is assigned an average of all the fermi energy in each
object in the list_bs is used.
Returns:
A BandStructure or BandStructureSymmLine object (depending on
the type of the list_bs objects)
"""
if efermi is None:
efermi = sum([b.efermi for b in list_bs]) / len(list_bs)
kpoints = []
labels_dict = {}
rec_lattice = list_bs[0]._lattice_rec
nb_bands = min([list_bs[i]._nb_bands for i in range(len(list_bs))])
for bs in list_bs:
for k in bs._kpoints:
kpoints.append(k.frac_coords)
for k, v in bs._labels_dict.items():
labels_dict[k] = v.frac_coords
eigenvals = {Spin.up: [list_bs[0]._bands[Spin.up][i]
for i in range(nb_bands)]}
for i in range(nb_bands):
for bs in list_bs[1:]:
for e in bs._bands[Spin.up][i]:
eigenvals[Spin.up][i].append(e)
if list_bs[0].is_spin_polarized:
eigenvals[Spin.down] = [list_bs[0]._bands[Spin.down][i]
for i in range(nb_bands)]
for i in range(nb_bands):
for bs in list_bs[1:]:
for e in bs._bands[Spin.down][i]:
eigenvals[Spin.down][i].append(e)
projections = {}
if len(list_bs[0]._projections) != 0:
projections = {Spin.up: [list_bs[0]._projections[Spin.up][i]
for i in range(nb_bands)]}
for i in range(nb_bands):
for bs in list_bs[1:]:
projections[Spin.up][i].extend(bs._projections[Spin.up][i])
if list_bs[0].is_spin_polarized:
projections[Spin.down] = [list_bs[0]._projections[Spin.down][i]
for i in range(nb_bands)]
for i in range(nb_bands):
for bs in list_bs[1:]:
projections[Spin.down][i].extend(
bs._projections[Spin.down][i])
if isinstance(list_bs[0], BandStructureSymmLine):
return BandStructureSymmLine(kpoints, eigenvals, rec_lattice,
efermi, labels_dict,
structure=list_bs[0]._structure,
projections=projections)
else:
return BandStructure(kpoints, eigenvals, rec_lattice, efermi,
labels_dict, structure=list_bs[0]._structure,
projections=projections)
|
Dioptas/pymatgen
|
pymatgen/electronic_structure/bandstructure.py
|
Python
|
mit
| 40,975
|
[
"CRYSTAL",
"pymatgen"
] |
9ba5198341b06446db20f336671869a527e1a6e60ba9487fbe6f01868eb180ee
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_OK, S_ERROR, gLogger
class FilterExecutor(object):
ALLKW = "all"
def __init__(self):
self.__filters = {}
self.__globalFilters = []
def applyFilters(self, iD, credDict, condDict, groupingList):
filters2Apply = list(self.__globalFilters)
if iD in self.__filters:
filters2Apply.extend(self.__filters[iD])
for myFilter in filters2Apply:
try:
gLogger.info("Applying filter %s for %s" % (myFilter.__name__, iD))
retVal = myFilter(credDict, condDict, groupingList)
if not retVal["OK"]:
gLogger.info("Filter %s for %s failed: %s" % (myFilter.__name__, iD, retVal["Message"]))
return retVal
except Exception:
gLogger.exception("Exception while applying filter", "%s for %s" % (myFilter.__name__, iD))
return S_ERROR("Exception while applying filters")
return S_OK()
def addFilter(self, iD, myFilter):
if iD not in self.__filters:
self.__filters[iD] = []
if isinstance(myFilter, (list, tuple)):
self.__filters[iD].extend(myFilter)
else:
self.__filters[iD].append(myFilter)
def addGlobalFilter(self, myFilter):
if isinstance(myFilter, (list, tuple)):
self.__globalFilters.extend(myFilter)
else:
self.__globalFilters.append(myFilter)
|
ic-hep/DIRAC
|
src/DIRAC/AccountingSystem/private/Policies/FilterExecutor.py
|
Python
|
gpl-3.0
| 1,583
|
[
"DIRAC"
] |
3ccf808fd5b0ab1b9c4a5acf32204e44bd0b04a79f0668814cb9c568eba7eef9
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyMethylcode(PythonPackage):
"""MethylCoder is a single program that takes of bisulfite-treated
reads and outputs per-base methylation data. """
homepage = "https://github.com/brentp/methylcode"
url = "https://github.com/brentp/methylcode/archive/master.zip"
version('1.0.0', 'd0ba07c1ab2c74adddd1b23f8e5823e7')
depends_on('python@2.7.0:2.7.999')
depends_on('py-six')
depends_on('py-setuptools')
depends_on('py-numpy')
depends_on('py-pyparsing')
depends_on('py-pyfasta')
depends_on('py-bsddb3')
depends_on('bowtie')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/py-methylcode/package.py
|
Python
|
lgpl-2.1
| 1,842
|
[
"Bowtie"
] |
8b695a8f97568a98786f4cd60fad467b1a00b6f84e1c221b71d91422fa85a0e9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# PyVortex: Vortex Library Python bindings
# Copyright (C) 2009 Advanced Software Production Line, S.L.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
# You may find a copy of the license under this software is released
# at COPYING file. This is LGPL software: you are welcome to develop
# proprietary applications using this library without any royalty or
# fee but returning back any change, improvement or addition in the
# form of source code, project image, documentation patches, etc.
#
# For commercial support on build BEEP enabled solutions contact us:
#
# Postal address:
# Advanced Software Production Line, S.L.
# C/ Antonio Suarez Nº 10,
# Edificio Alius A, Despacho 102
# Alcalá de Henares 28802 (Madrid)
# Spain
#
# Email address:
# info@aspl.es - http://www.aspl.es/vortex
#
# import sys for command line parsing
import sys
import time
# import python vortex binding
import vortex
# import vortex sasl support
import vortex.sasl
# import vortex tls support
import vortex.tls
# import alive support
import vortex.alive
# import common items for reg test
from regtest_common import *
####################
# regression tests #
####################
def test_00_a_check (queue):
a_tuple = queue.pop ()
if not a_tuple:
error ("Found not defined expected tuple, but found: " + a_tuple)
return False
if a_tuple[0] != 2 or a_tuple[1] != 3:
error ("Expected to find differente values but found: " + str (a_tuple[0]) + ", and: " + str (a_tuple[1]))
return False
# get a string
a_string = queue.pop ()
if a_string != "This is an string":
error ("Expected to receive string: 'This is an string', but received: " + a_string)
return False
# get a list
a_list = queue.pop ()
if len (a_list) != 4:
error ("Expected to find list length: " + len (a_list))
return False
return True
def test_00_a():
##########
# create a queue
queue = vortex.AsyncQueue ()
# call to terminate queue
del queue
######### now check data storage
queue = vortex.AsyncQueue ()
# push items
queue.push (1)
queue.push (2)
queue.push (3)
# get items
value = queue.pop ()
if value != 1:
error ("Expected to find 1 but found: " + str(value))
return False
value = queue.pop ()
if value != 2:
error ("Expected to find 2 but found: " + str(value))
return False
value = queue.pop ()
if value != 3:
error ("Expected to find 3 but found: " + str(value))
return False
# call to unref
# del queue # queue.unref ()
###### now masive add operations
queue = vortex.AsyncQueue ()
# add items
iterator = 0
while iterator < 1000:
queue.push (iterator)
iterator += 1
# restore items
iterator = 0
while iterator < 1000:
value = queue.pop ()
if value != iterator:
error ("Expected to find: " + str(value) + ", but found: " + str(iterator))
return False
iterator += 1
##### now add different types of data
queue = vortex.AsyncQueue ()
queue.push ((2, 3))
queue.push ("This is an string")
queue.push ([1, 2, 3, 4])
# get a tuple
if not test_00_a_check (queue):
return False
#### now add several different item
queue = vortex.AsyncQueue ()
iterator = 0
while iterator < 1000:
queue.push ((2, 3))
queue.push ("This is an string")
queue.push ([1, 2, 3, 4])
# next iterator
iterator += 1
# now retreive all items
iterator = 0
while iterator < 1000:
# check queue items
if not test_00_a_check (queue):
return False
# next iterator
iterator += 1
return True
def test_01():
# call to initilize a context and to finish it
ctx = vortex.Ctx ()
# init context and finish it */
info ("init context..")
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# ok, now finish context
info ("finishing context..")
ctx.exit ()
# finish ctx
del ctx
return True
def test_02():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port, timeout = 5000000)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
info ("BEEP connection created to: " + conn.host + ":" + conn.port)
# now close the connection
info ("Now closing the BEEP session..")
conn.close ()
ctx.exit ()
# finish ctx
del ctx
return True
# test connection shutdown before close.
def test_03 ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now shutdown
conn.shutdown ()
# now close the connection (already shutted down)
conn.close ()
ctx.exit ()
# finish ctx
del ctx
return True
# test connection shutdown before close.
def test_03_a ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# set some data
conn.set_data ('value', 1)
conn.set_data ('value2', 2)
conn.set_data ('boolean', True)
# now set a connection to also check it is released
conn2 = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn2.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
conn.set_data ('conn', conn2)
# recover data
if conn.get_data ('value') != 1:
error ("Expected to find value == 1 but found: " + str (conn.get_data ('value')))
return False
if conn.get_data ('value2') != 2:
error ("Expected to find value2 == 2 but found: " + str (conn.get_data ('value2')))
return False
if not conn.get_data ('boolean'):
error ("Expected to find boolean == True but found: " + str (conn.get_data ('boolean')))
return False
conn3 = conn.get_data ('conn')
# check conn references
if conn2.id != conn3.id:
error ("Expected to find same connection references but found they differs: " + str (conn2.id) + " != " + str (conn3.id))
return True
# create a channel
def test_04 ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# check find by uri method
channels = conn.find_by_uri (REGRESSION_URI)
if len (channels) != 0:
error ("Expected to find 0 channels opened with " + REGRESSION_URI + ", but found: " + str (len (channels)))
return False
# now create a channel
channel = conn.open_channel (0, REGRESSION_URI)
if not channel:
error ("Expected to find proper channel creation, but error found:")
# get first message
err = conn.pop_channel_error ()
while err:
error ("Found error message: " + str (err[0]) + ": " + err[1])
# next message
err = conn.pop_channel_error ()
return False
# check ready flag
if not channel.is_ready:
error ("Expected to find channel flagged as ready..")
return False
# check find by uri method
channels = conn.find_by_uri (REGRESSION_URI)
if len (channels) != 1:
error ("Expected to find 1 channels opened with " + REGRESSION_URI + ", but found: " + str (len (channels)))
return False
if channels[0].number != channel.number:
error ("Expected to find equal channel number, but found: " + str (channels[0].number))
return False
if channels[0].profile != channel.profile:
error ("Expected to find equal channel number, but found: " + str (channels[0].number))
return False
# check channel installed
if conn.num_channels != 2:
error ("Expected to find only two channels installed (administrative BEEP channel 0 and test channel) but found: " + conn.num_channels ())
return False
# now close the channel
if not channel.close ():
error ("Expected to find proper channel close operation, but error found: ")
# get first message
err = conn.pop_channel_error ()
while err:
error ("Found error message: " + str (err[0]) + ": " + err[1])
# next message
err = conn.pop_channel_error ()
return False
# check channel installed
if conn.num_channels != 1:
error ("Expected to find only one channel installed (administrative BEEP channel 0) but found: " + conn.num_channels ())
return False
# now close the connection (already shutted down)
conn.close ()
ctx.exit ()
# finish ctx
del ctx
return True
def test_05_received (conn, channel, frame, data):
# push data received
data.push (frame)
return
# create a channel
def test_05 ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now create a channel
channel = conn.open_channel (0, REGRESSION_URI)
if not channel:
error ("Expected to find proper channel creation, but error found:")
# get first message
err = conn.pop_channel_error ()
while err:
error ("Found error message: " + str (err[0]) + ": " + err[1])
# next message
err = conn.pop_channel_error ()
return False
# configure frame received handler
queue = vortex.AsyncQueue ()
channel.set_frame_received (vortex.queue_reply, queue)
# send a message to test */
channel.send_msg ("This is a test", 14)
# wait for the reply
frame = channel.get_reply (queue)
# check frame content here
if frame.payload != "This is a test":
error ("Received frame content '" + frame.payload + "', but expected: 'This is a test'")
return False
# check frame type
if frame.type != "RPY":
error ("Expected to receive frame type RPY but found: " + frame.type)
return False
# check frame sizes
if frame.content_size != 16:
error ("Expected to find content size equal to 16 but found: " + frame.content_size)
# check frame sizes
if frame.payload_size != 14:
error ("Expected to find payload size equal to 14 but found: " + frame.payload_size)
# now test to remove frame received
channel.set_frame_received ()
# now close the connection (already shutted down)
conn.close ()
ctx.exit ()
return True
def test_06_received (conn, channel, frame, data):
# push frame received
data.push (frame)
def test_06 ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now create a channel
channel = conn.open_channel (0, REGRESSION_URI)
# flag the channel to do deliveries in a serial form
channel.set_serialize = True
# configure frame received
queue = vortex.AsyncQueue ()
channel.set_frame_received (test_06_received, queue)
# send 100 frames and receive its replies
iterator = 0
while iterator < 100:
# build message
message = ";; This buffer is for notes you don't want to save, and for Lisp evaluation.\n\
;; If you want to create a file, visit that file with C-x C-f,\n\
;; then enter the text in that file's own buffer: message num: " + str (iterator)
# send the message
channel.send_msg (message, len (message))
# update iterator
iterator += 1
# now receive and process all messages
iterator = 0
while iterator < 100:
# build message to check
message = ";; This buffer is for notes you don't want to save, and for Lisp evaluation.\n\
;; If you want to create a file, visit that file with C-x C-f,\n\
;; then enter the text in that file's own buffer: message num: " + str (iterator)
# now get a frame
frame = queue.pop ()
# check content
if frame.payload != message:
error ("Expected to find message '" + message + "' but found: '" + frame.payload + "'")
return False
# next iterator
iterator += 1
# now check there are no pending message in the queue
if queue.items != 0:
error ("Expected to find 0 items in the queue but found: " + queue.items)
return False
# close connection
conn.close ()
# finish context
ctx.exit ()
return True
def test_07 ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now create a channel
channel = conn.open_channel (0, REGRESSION_URI)
# configure frame received
queue = vortex.AsyncQueue ()
channel.set_frame_received (test_06_received, queue)
# send 100 frames and receive its replies
iterator = 0
while iterator < 100:
# build message
message = ";; This buffer is for notes you don't want to save, and for Lisp evaluation.\n\
;; If you want to create a file, visit that file with C-x C-f,\n\
;; then enter the text in that file's own buffer: message num: " + str (iterator)
# send the message
channel.send_msg (message, len (message))
# now get a frame
frame = queue.pop ()
# check content
if frame.payload != message:
error ("Expected to find message '" + message + "' but found: '" + frame.payload + "'")
return False
# next iterator
iterator += 1
# now check there are no pending message in the queue
if queue.items != 0:
error ("Expected to find 0 items in the queue but found: " + queue.items)
return False
# close connection
conn.close ()
# finish context
ctx.exit ()
return True
def test_08 ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now create a channel
channel = conn.open_channel (0, REGRESSION_URI_ZERO)
# configure frame received
queue = vortex.AsyncQueue ()
# configure frame received
channel.set_frame_received (test_06_received, queue)
# build the content to transfer (add r to avoid python to handle it)
message = r"\0\0\0\0\0\0\0\0" * 8192
iterator = 0
while iterator < 10:
# send the message
channel.send_msg (message, len (message))
# next iterator
iterator += 1
# now receive content and check
iterator = 0
while iterator < 10:
# receive
frame = queue.pop ()
# check content
if frame.payload != message:
error ("Expected to find binary zerored string but found string mismatch")
return False
# check content length
if frame.payload_size != len (message):
error ("String size mismatch, expected to find: " + str (len (message)) + ", but found: " + frame.payload_size)
return False
# next iterator
iterator += 1
# close connection
conn.close ()
# finish context
ctx.exit ()
return True
def test_09 ():
# max channels
test_09_max_channels = 24
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now create a channel
queue = vortex.AsyncQueue ()
iterator = 0
channels = []
while iterator < test_09_max_channels:
# create the channel
channels.append (conn.open_channel (0, REGRESSION_URI,
# configure frame received
frame_received=vortex.queue_reply, frame_received_data=queue))
# next iterator
iterator += 1
# send content over all channels
for channel in channels:
# check message send status
if channel.send_msg ("This is a test..", 16) < 0:
print ("Failed to send message..")
# pop all messages replies
for channel in channels:
# get frame
frame = channel.get_reply (queue)
# check content
if frame.payload != "This is a test..":
error ("Expected to find 'This is a test' but found: " + frame.payload)
return False
# check no pending items are in the queue
if queue.items != 0:
error ("Expected to find 0 items on the queue, but found: " + queue.items)
return False
# now close all channels
for channel in channels:
# close the channels
if not channel.close ():
error ("Expected to close channel opened previously, but found an error..")
return False
# check channels opened on the connection
if conn.num_channels != 1:
error ("Expected to find only two channels installed (administrative BEEP channel 0 and test channel) but found: " + str (conn.num_channels))
return False
# close connection
conn.close ()
# finish context
ctx.exit ()
return True
def test_10 ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# open a channel
channel = conn.open_channel (0, REGRESSION_URI_DENY)
if channel:
error ("Expected to find channel error but found a proper channel reference (1)")
return False
# check errors here
err = conn.pop_channel_error ()
if err[0] != 554:
error ("Expected to find error code 554 but found: " + str (err[0]))
return False
# check for no more pending errors
err = conn.pop_channel_error ()
if err:
error ("Expected to find None (no error) but found: " + err)
return False
# open a channel (DENY with a supported profile)
channel = conn.open_channel (0, REGRESSION_URI_DENY_SUPPORTED)
if channel:
error ("Expected to find channel error but found a proper channel reference (2)")
return False
# check errors here
err = conn.pop_channel_error ()
if err[0] != 421:
error ("Expected to find error code 421 but found: " + str (err[0]))
return False
# check for no more pending errors
err = conn.pop_channel_error ()
if err:
error ("Expected to find None (no error) but found: " + err)
return False
# close connection
conn.close ()
# finish context
ctx.exit ()
return True
def test_10_a ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# open a channel
queue = vortex.AsyncQueue ()
channel = conn.open_channel (0, REGRESSION_URI_ANS,
frame_received=vortex.queue_reply, frame_received_data=queue)
if not channel:
error ("Expected to find channel error but found a proper channel reference")
return False
# enable serialization
channel.set_serialize = True
# send a message to receive all the content
channel.send_msg ("give da content", 15)
# wait for all replies
iterator = 0
while iterator < 10:
# get frame
frame = channel.get_reply (queue)
if not frame:
print ("ERROR: expected to not receive None")
# check connection
if not conn.is_ok ():
print ("ERROR: found connection closed (not working)")
return False
# check frame type
if frame.type != "ANS":
error ("Expected to receive frame type ANS but received: " + frame.type)
return False
# check frame size
if frame.payload_size != len (TEST_REGRESSION_URI_4_MESSAGE):
error ("Expected to receive " + str (frame.payload_size) + " bytes but received: " + str (len (TEST_REGRESSION_URI_4_MESSAGE)))
return False
# check frame content
if frame.payload != TEST_REGRESSION_URI_4_MESSAGE:
error ("Expected to receive content: " + frame.payload + " but received: " + TEST_REGRESSION_URI_4_MESSAGE)
return False
# next message
iterator += 1
# now check for last ans
frame = channel.get_reply (queue)
# check frame type
if frame.type != "NUL":
error ("Expected to receive frame type NUL but received: " + frame.type)
return False
# check frame size
if frame.payload_size != 0:
error ("Expected to receive 0 bytes but received: " + str (frame.payload_size))
return False
return True
def test_10_b_received (conn, channel, frame, data):
info ("Test 10-b: Notification received..")
# queue connection and frame
data.push (conn)
data.push (frame)
data.push (channel)
return
def test_10_b_create_connection_and_send_content (ctx, queue):
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
channel = conn.open_channel (0, REGRESSION_URI)
if not channel:
error ("Expected to find channel error but found a proper channel reference")
return False
# now setup received handler
channel.set_frame_received (test_10_b_received, queue)
# send content
channel.send_msg ("This is a test", 14)
# channel.incref ()
info ("Content sent, now wait for replies..")
return conn
def test_10_b ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
info ("Creating queue, connection, channel and sending content..")
queue = vortex.AsyncQueue ()
# PART 1: check channel.incref
info ("PART 1: check channel.incref ()")
conn2 = test_10_b_create_connection_and_send_content (ctx, queue)
if not conn2:
error ("Failed to initialize connection, channel or content to be sent")
return False
# now get reply
info ("Waiting for replies....")
conn = queue.pop ()
frame = queue.pop ()
channel = queue.pop ()
info ("Received content.....")
# check connection status
if not conn.is_ok ():
error ("Expected to find connection status ok, but found a failure: " + conn.status_msg)
return False
# check frame type and content
if not frame.type == "RPY":
error ("Expected to find frame type RPY but found: " + frame.type)
return False
if not frame.payload == "This is a test":
error ("Expected to find frame content 'This is a test' but found: " + frame.payload)
# decrement reference counting
# channel.decref ()
conn.close ()
return True
def test_10_c_on_channel (number, channel, conn, queue):
info ("Received async channel notification, number: " + str (number) )
queue.push (channel)
return
def test_10_c ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
queue = vortex.AsyncQueue ()
# create connection
conn = vortex.Connection (ctx, host, port)
# check connection status
if not conn.is_ok ():
error ("Expected to find connection status ok, but found a failure: " + conn.status_msg)
return False
# ok now create channel without waiting
conn.open_channel (0, REGRESSION_URI, on_channel=test_10_c_on_channel, on_channel_data=queue)
# wait for response
channel = queue.pop ()
# check channel value here and send some content
info ("Channel received in main thread: " + str (channel.number))
# send the message
message = "This is a test message after async channel notification"
iterator = 0
channel.set_frame_received (vortex.queue_reply, queue)
while iterator < 10:
channel.send_msg (message, len (message))
# now get a frame
frame = channel.get_reply (queue)
if not frame:
error ("Expected to find frame reply but found None reference")
return False
if frame.payload != message:
error ("Expected to receive different message but found: " + frame.payload + ", rather: " + message)
return False
# next position
iterator += 1
# NOTE: the test do not close conn or channe (this is intentional)
return True
def test_10_d ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
queue = vortex.AsyncQueue ()
# create connection
conn = vortex.Connection (ctx, host, port)
# check connection status
if not conn.is_ok ():
error ("Expected to find connection status ok, but found a failure: " + conn.status_msg)
return False
# ok now create channel without waiting
conn.open_channel (0, REGRESSION_URI_DENY_SUPPORTED, on_channel=test_10_c_on_channel, on_channel_data=queue)
# wait for response
channel = queue.pop ()
# check channel value here and send some content
if channel:
error ("Expected to find None value at channel reference..")
return False
# ok check again connection and create a channel
if not conn.is_ok ():
error ("Expected to find connection properly created..")
return False
channel = conn.open_channel (0, REGRESSION_URI)
if not channel:
error ("Expected to find proper channel..")
return False
# send some data
iterator = 0
channel.set_frame_received (vortex.queue_reply, queue)
message = "This is a test at channel error expected.."
while iterator < 10:
channel.send_msg (message, len (message))
# now get a frame
frame = channel.get_reply (queue)
if not frame:
error ("Expected to find frame reply but found None reference")
return False
if frame.payload != message:
error ("Expected to receive different message but found: " + frame.payload + ", rather: " + message)
return False
# next position
iterator += 1
# NOTE: the test do not close conn or channe (this is intentional)
return True
def queue_reply (conn, channel, frame, data):
data.push (frame)
return
def create_channel_and_send (conn, queue):
# ok now create channel without waiting
channel = conn.open_channel (0, REGRESSION_URI)
# set frame received
channel.set_frame_received (queue_reply, queue)
channel.send_msg ("This is a test", -1)
return
def test_10_e ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
queue = vortex.AsyncQueue ()
# create connection
conn = vortex.Connection (ctx, host, port)
# check connection status
if not conn.is_ok ():
error ("Expected to find connection status ok, but found a failure: " + conn.status_msg)
return False
# create channel and send content
create_channel_and_send (conn, queue)
# wait for response
frame = queue.pop ()
if frame.payload != "This is a test":
error ("Expected to find '%s' but found '%s'" % ("This is a test", frame.payload))
return False
info ("Found expected content!")
# NOTE: the test do not close conn or channe (this is intentional)
return True
def test_10_f ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
queue = vortex.AsyncQueue ()
# create connection
iterator = 0
while iterator < 5:
conn = vortex.Connection (ctx, host, port)
# check connection status
if not conn.is_ok ():
error ("Expected to find connection status ok, but found a failure: " + conn.status_msg)
return False
# ok now create channel without waiting
channel = conn.open_channel (0, REGRESSION_URI)
channel.set_frame_received (queue_reply, queue)
channel.send_msg ("<close-connection>", -1)
iterator += 1
info ("Found expected content!")
# NOTE: the test do not close conn or channe (this is intentional)
return True
def test_11 ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now check for services not available for a simple connection
if conn.role != "initiator":
error ("Expected to find 'initiator' as connection role, but found: " + conn.role)
return False
conn.close ()
# now open a listener and check its function
listener = vortex.create_listener (ctx, "0.0.0.0", "0")
# check listener status
if not listener.is_ok ():
error ("Expected to find proper listener creation, but a failure found: " + listener.error_msg)
return False
# now check for
if listener.pop_channel_error ():
error ("Expected to find None value returned from a method not available for listeners")
return False
# try to open a channel with the listener
channel = listener.open_channel (0, REGRESSION_URI)
if channel:
error ("Expected to find channel error but found a proper channel reference")
return False
# now try to connect
conn = vortex.Connection (ctx, listener.host, listener.port)
# check connection
if not conn.is_ok ():
error ("Expected to find proper connection to local listener")
return False
# call to shutdown
listener.shutdown ()
return True
def test_12_on_close_a (conn, queue2):
queue = queue2.pop ()
queue.push (1)
time.sleep (2)
def test_12_on_close_b (conn, queue2):
queue = queue2.pop ()
queue.push (2)
time.sleep (2)
def test_12_on_close_c (conn, queue2):
queue = queue2.pop ()
queue.push (3)
time.sleep (2)
def test_12():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# create a queue
queue = vortex.AsyncQueue ()
queue2 = vortex.AsyncQueue ()
# wait for replies
queue2.push (queue)
# configure on close
queue_list = []
conn.set_on_close (test_12_on_close_a, queue2)
conn.set_on_close (test_12_on_close_b, queue2)
conn.set_on_close (test_12_on_close_c, queue2)
# now shutdown
conn.shutdown ()
value = queue.pop ()
if value != 1:
error ("Test 12: Expected to find 1 but found (0001): " + str (value))
return False
# wait for replies
queue2.push (queue)
value = queue.pop ()
if value != 2:
error ("Expected to find 2 but found (0002): " + str (value))
return False
# wait for replies
queue2.push (queue)
value = queue.pop ()
if value != 3:
error ("Expected to find 3 but found (0003): " + str (value))
return False
# re-connect
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# wait for replies
queue2.push (queue)
# configure on close
conn.set_on_close (test_12_on_close_a, queue2)
conn.set_on_close (test_12_on_close_a, queue2)
conn.set_on_close (test_12_on_close_a, queue2)
# now shutdown
conn.shutdown ()
value = queue.pop ()
if value != 1:
error ("Expected to find 1 but found (0004): " + str (value))
return False
# wait for replies
queue2.push (queue)
value = queue.pop ()
if value != 1:
error ("Expected to find 1 but found (0005): " + str (value))
return False
# wait for replies
queue2.push (queue)
value = queue.pop ()
if value != 1:
error ("Expected to find 1 but found (0006): " + str (value))
return False
return True
def test_12_a_closed (conn, queue):
queue.push (3)
def test_12_a ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
iterator = 10
while iterator > 0:
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# create a queue
queue = vortex.AsyncQueue ()
# configure on close
conn.set_on_close (test_12_a_closed, queue)
# start a channel that will be closed by listener
channel = conn.open_channel (0, REGRESSION_URI_START_CLOSE)
if channel:
error ("Expected to find channel error creation, but found proper reference")
return False
# check value from queue
value = queue.pop ()
if value != 3:
error ("Expected to find 3 but found" + str (value))
return False
# reduce iterator
iterator -= 1
return True
def test_12_b_closed (conn, queue):
queue.push (3)
def test_12_b ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
iterator = 3
while iterator > 0:
# call to create a connection
info ("registering connection to be closed...")
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# create a queue
queue = vortex.AsyncQueue ()
# configure on close
conn.set_on_close (test_12_b_closed, queue)
# start a channel to notify the connection to shutdown on next start
channel = conn.open_channel (0, REGRESSION_URI_RECORD_CONN)
if not channel:
error ("(1) Expected proper channel creation..")
return False
# ok, now create a second different content and start a
# channel that will fail and will also close previous
# connection
info ("creating second connection...")
conn2 = vortex.Connection (ctx, host, port)
if not conn2.is_ok ():
error ("Expected proper second connection creation..")
return False
# start a channel that will be closed by listener
info ("opening second channel......")
channel = conn2.open_channel (0, REGRESSION_URI_CLOSE_RECORDED_CONN)
if channel:
error ("Expected to find channel error creation, but found proper reference")
return False
# check value from queue
info ("test 12-b: checking value from the queue..")
value = queue.pop ()
if value != 3:
error ("Expected to find 3 but found" + str (value))
return False
# reduce iterator
iterator -= 1
return True
def test_12_c_conn_closed (conn, queue):
info ("Received connection close, pushing reference to main thread")
queue.push (conn)
return
def test_12_c_on_channel (number, channel, conn, data):
info ("Received expected channel start failure..")
return
def test_12_c_create_conn (ctx, queue):
conn = vortex.Connection (ctx, host, port)
if not conn.is_ok ():
error ("Expected proper connection created..")
return False
# set connection close
conn.set_on_close (test_12_c_conn_closed, queue)
# now create a channel
conn.open_channel (0, REGRESSION_URI_START_CLOSE, on_channel=test_12_c_on_channel)
info ("Finished connection and channel start requests..")
return
def test_12_c ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
queue = vortex.AsyncQueue ()
# now create a connection inside a function that finishes
test_12_c_create_conn (ctx, queue)
info ("receiving connection from connection close..")
conn = queue.pop ()
info ("Waiting two seconds..")
time.sleep (2)
# check internal references
if conn.id == -1:
error ("Error, expected to find valid connection id identifier")
return False
info ("Ok, received connection reference with id: " + str (conn.id))
return True
def test_12_d_on_close (conn, queue):
info ("Received connection close with id: " + str (conn.id))
queue.push (conn)
return
def test_12_d_frame_received (conn, channel, frame, queue):
# ok, set on close handler
info ("Received frame received, setting on close notification..")
conn.set_on_close (test_12_d_on_close, queue)
return
def test_12_d ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# create a listener
listener = vortex.create_listener (ctx, "127.0.0.1", "0")
if not listener.is_ok ():
error ("Expected to find proper listener creation but found a failure..")
return False
queue = vortex.AsyncQueue ()
vortex.register_profile (ctx, "urn:beep:aspl.es:profiles:test_12_d",
frame_received=test_12_d_frame_received,
frame_received_data=queue)
# ok, now create a connection to this listener
conn = vortex.Connection (ctx, "127.0.0.1", listener.port)
if not conn.is_ok ():
error ("Expected proper connection create but failure found..")
return False
# ok, now create a channel and send a message
channel = conn.open_channel (0, "urn:beep:aspl.es:profiles:test_12_d")
if not channel:
error ("Expected proper channel creation..")
return False
# send a message to record the channel
channel.send_msg ("this is a test", 14)
info ("Waiting 2 seconds to close connection..")
time.sleep (1);
conn.shutdown ()
info ("Waiting connection from queue")
conn2 = queue.pop ()
if conn2.id == -1 or conn.id == -1:
error ("Expected to connection id values different from -1 but found: " + str (conn.id) + " != " + str (conn2.id))
return False
info ("connection matches..")
return True
def test_12_failing (queue, data):
import sys
error ("ERROR: connection close handler should not be received")
sys.exit (-1)
def test_12_e ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# set on close connection
close_id = conn.set_on_close (test_12_failing)
# now rmeove connection close
if not conn.remove_on_close (close_id):
error ("Expected proper status (True) after removing on close handler..")
return False
info ("removed on close handler..")
# close connection
conn.shutdown ()
info ("connection shutted down, waiting to close connection..")
# waiting to trigger failure..
queue = vortex.AsyncQueue ()
queue.timedpop (200000)
return True
def test_13():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
iterator = 0
while iterator < 4:
# create a listener
info ("Test 13: creating listener iterator=%d" % iterator)
listener = vortex.create_listener (ctx, "0.0.0.0", "0")
# check listener status
if not listener.is_ok ():
error ("Expected to find proper listener creation, but found error: " + listener.error_msg)
return False
# create another listener reusing the port
listener2 = vortex.create_listener (ctx, "0.0.0.0", listener.port)
if listener2.is_ok ():
error ("Expected to find failure while creating a second listener reusing a port: " + listener2.error_msg)
return False
# check the role even knowning it is not working
info ("Test 13: checking role for listener, iterator=%d" % iterator)
if listener.role != "master-listener":
error ("Expected to find master-listener role but found: " + listener2.role)
return False
# close listener2
listener2.close ()
# check listener status
if not listener.is_ok ():
error ("Expected to find proper listener creation, but found error: " + listener.error_msg)
return False
# close the listener
listener.close ()
iterator += 1
return True
def test_14():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# connect
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now authenticate connection
if not vortex.sasl.init (ctx):
error ("Expected to find proper authentication initialization, but found an error")
return False
# do an auth opeation using plain profile
(status, message) = vortex.sasl.start_auth (conn=conn, profile="plain", auth_id="bob", password="secret")
# check for VortexOk status
if status != 2:
error ("Expected to find VortexOk status code, but found: " + str (status) + ", error message was: " + message)
return False
# check authentication status
if not vortex.sasl.is_authenticated (conn):
error ("Expected to find is authenticated status but found un-authenticated connection")
return False
if "http://iana.org/beep/SASL/PLAIN" != vortex.sasl.method_used (conn):
error ("Expected to find method used: http://iana.org/beep/SASL/PLAIN, but found: " + vortex.sasl.method_used (conn))
return False
# check auth id
if vortex.sasl.auth_id (conn) != "bob":
error ("Expected to find auth id bob but found: " + vortex.sasl.auth_id (conn))
return False
# close connection
conn.close ()
# do a SASL PLAIN try with wrong crendetials
conn = vortex.Connection (ctx, host, port)
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# do an auth opeation using plain profile
(status, message) = vortex.sasl.start_auth (conn=conn, profile="plain", auth_id="bob", password="secret1")
if status != 1:
error ("Expected to find status 1 but found: " + str (status))
# check authentication status
if vortex.sasl.is_authenticated (conn):
error ("Expected to not find is authenticated status but found un-authenticated connection")
return False
if vortex.sasl.method_used (conn):
error ("Expected to find none method used but found: " + vortex.sasl.method_used (conn))
return False
# check auth id
if vortex.sasl.auth_id (conn):
error ("Expected to find none auth id but found something defined: " + vortex.sasl.auth_id (conn))
return False
return True
def test_15():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# connect
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now authenticate connection
if not vortex.sasl.init (ctx):
error ("Expected to find proper authentication initialization, but found an error")
return False
# do an auth opeation using anonymous profile
(status, message) = vortex.sasl.start_auth (conn=conn, profile="anonymous", anonymous_token="test@aspl.es")
# check for VortexOk status
if status != 2:
error ("Expected to find VortexOk status code, but found: " + str (status) + ", error message was: " + message)
return False
# check authentication status
if not vortex.sasl.is_authenticated (conn):
error ("Expected to find is authenticated status but found un-authenticated connection")
return False
if vortex.sasl.ANONYMOUS != vortex.sasl.method_used (conn):
error ("Expected to find method used: http://iana.org/beep/SASL/ANONYMOUS, but found: " + vortex.sasl.method_used (conn))
return False
# close connection
conn.close ()
# do a SASL ANONYMOUS try with wrong crendetials
conn = vortex.Connection (ctx, host, port)
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# do an auth opeation using anonymous profile
(status, message) = vortex.sasl.start_auth (conn=conn, profile="anonymous", anonymous_token="wrong@aspl.es")
if status != 1:
error ("Expected to find status 1 but found: " + str (status))
# check authentication status
if vortex.sasl.is_authenticated (conn):
error ("Expected to not find is authenticated status but found un-authenticated connection")
return False
if vortex.sasl.method_used (conn):
error ("Expected to find none method used but found: " + vortex.sasl.method_used (conn))
return False
return True
def test_16():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# connect
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now authenticate connection
if not vortex.sasl.init (ctx):
error ("Expected to find proper authentication initialization, but found an error")
return False
# do an auth opeation using DIGEST-MD5 profile
(status, message) = vortex.sasl.start_auth (conn=conn, profile="digest-md5", auth_id="bob", password="secret", realm="aspl.es")
# check for VortexOk status
if status != 2:
error ("Expected to find VortexOk status code, but found: " + str (status) + ", error message was: " + message)
return False
# check authentication status
if not vortex.sasl.is_authenticated (conn):
error ("Expected to find is authenticated status but found un-authenticated connection")
return False
if vortex.sasl.DIGEST_MD5 != vortex.sasl.method_used (conn):
error ("Expected to find method used: " + vortex.sasl.DIGEST_MD5 + ", but found: " + vortex.sasl.method_used (conn))
return False
# check auth id
if vortex.sasl.auth_id (conn) != "bob":
error ("Expected to find auth id bob but found: " + vortex.sasl.auth_id (conn))
return False
# close connection
conn.close ()
# do a SASL DIGEST-MD5 try with wrong crendetials
conn = vortex.Connection (ctx, host, port)
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# do an auth opeation using DIGEST-MD5 profile
(status, message) = vortex.sasl.start_auth (conn=conn, profile="digest-md5", auth_id="bob", password="secret1")
if status != 1:
error ("Expected to find status 1 but found: " + str (status))
# check authentication status
if vortex.sasl.is_authenticated (conn):
error ("Expected to not find is authenticated status but found un-authenticated connection")
return False
if vortex.sasl.method_used (conn):
error ("Expected to find none method used but found: " + vortex.sasl.method_used (conn))
return False
# check auth id
if vortex.sasl.auth_id (conn):
error ("Expected to find none auth id but found something defined: " + vortex.sasl.auth_id (conn))
return False
return True
def test_17():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# connect
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now authenticate connection
if not vortex.sasl.init (ctx):
error ("Expected to find proper authentication initialization, but found an error")
return False
# do an auth opeation using CRAM-MD5 profile
(status, message) = vortex.sasl.start_auth (conn=conn, profile="cram-md5", auth_id="bob", password="secret")
# check for VortexOk status
if status != 2:
error ("Expected to find VortexOk status code, but found: " + str (status) + ", error message was: " + message)
return False
# check authentication status
if not vortex.sasl.is_authenticated (conn):
error ("Expected to find is authenticated status but found un-authenticated connection")
return False
if vortex.sasl.CRAM_MD5 != vortex.sasl.method_used (conn):
error ("Expected to find method used: " + vortex.sasl.CRAM_MD5 + ", but found: " + vortex.sasl.method_used (conn))
return False
# check auth id
if vortex.sasl.auth_id (conn) != "bob":
error ("Expected to find auth id bob but found: " + vortex.sasl.auth_id (conn))
return False
# close connection
conn.close ()
# do a SASL CRAM-MD5 try with wrong crendetials
conn = vortex.Connection (ctx, host, port)
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# do an auth opeation using CRAM-MD5 profile
(status, message) = vortex.sasl.start_auth (conn=conn, profile="cram-md5", auth_id="bob", password="secret1")
if status != 1:
error ("Expected to find status 1 but found: " + str (status))
# check authentication status
if vortex.sasl.is_authenticated (conn):
error ("Expected to not find is authenticated status but found un-authenticated connection")
return False
if vortex.sasl.method_used (conn):
error ("Expected to find none method used but found: " + vortex.sasl.method_used (conn))
return False
# check auth id
if vortex.sasl.auth_id (conn):
error ("Expected to find none auth id but found something defined: " + vortex.sasl.auth_id (conn))
return False
return True
def test_18_common (conn):
# now create a channel and send content
channel = conn.open_channel (0, REGRESSION_URI)
# flag the channel to do deliveries in a serial form
channel.set_serialize = True
# configure frame received
queue = vortex.AsyncQueue ()
channel.set_frame_received (vortex.queue_reply, queue)
# send 100 frames and receive its replies
iterator = 0
while iterator < 100:
# build message
message = ";; This buffer is for notes you don't want to save, and for Lisp evaluation.\n\
;; If you want to create a file, visit that file with C-x C-f,\n\
;; then enter the text in that file's own buffer: message num: " + str (iterator)
# send the message
channel.send_msg (message, len (message))
# update iterator
iterator += 1
info ("receiving replies..")
# now receive and process all messages
iterator = 0
while iterator < 100:
# build message to check
message = ";; This buffer is for notes you don't want to save, and for Lisp evaluation.\n\
;; If you want to create a file, visit that file with C-x C-f,\n\
;; then enter the text in that file's own buffer: message num: " + str (iterator)
# now get a frame
frame = channel.get_reply (queue)
# check content
if frame.payload != message:
error ("Expected to find message '" + message + "' but found: '" + frame.payload + "'")
return False
# next iterator
iterator += 1
# now check there are no pending message in the queue
if queue.items != 0:
error ("Expected to find 0 items in the queue but found: " + queue.items)
return False
return True
def test_18():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# now enable tls support on the connection
if not vortex.tls.init (ctx):
error ("Expected to find proper authentication initialization, but found an error")
return False
# connect
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# enable TLS on the connection
(conn, status, status_msg) = vortex.tls.start_tls (conn)
# check connection after tls activation
if not conn.is_ok ():
error ("Expected to find proper connection status after TLS activation..")
return False
# check status
if status != vortex.status_OK:
error ("Expected to find status code : " + str (vortex.status_OK) + ", but found: " + str (status))
info ("TLS session activated, sending content..")
if not test_18_common (conn):
return False
return True
def test_19_notify (conn, status, status_msg, queue):
# push a tuple
queue.push ((conn, status, status_msg))
return
def test_19():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# now enable tls support on the connection
if not vortex.tls.init (ctx):
error ("Expected to find proper authentication initialization, but found an error")
return False
# connect
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# enable TLS on the connection using async notification
queue = vortex.AsyncQueue ()
if vortex.tls.start_tls (conn, tls_notify=test_19_notify, tls_notify_data=queue):
error ("Expected to receive None after async tls activation, but something different was found")
return False
# wait for the connection
(conn, status, statu_msg) = queue.pop ()
# check connection after tls activation
if not conn.is_ok ():
error ("Expected to find proper connection status after TLS activation..")
return False
# check status
if status != vortex.status_OK:
error ("Expected to find status code : " + str (vortex.status_OK) + ", but found: " + str (status))
info ("TLS session activated, sending content..")
if not test_18_common (conn):
return False
return True
def test_20_notify(conn, status, status_msg, queue):
# push status
queue.push ((status, status_msg))
def test_20():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# connect
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now authenticate connection
if not vortex.sasl.init (ctx):
error ("Expected to find proper authentication initialization, but found an error")
return False
# do an auth opeation using plain profile
queue = vortex.AsyncQueue ()
if vortex.sasl.start_auth (conn=conn, profile="plain", auth_id="bob", password="secret", auth_notify=test_20_notify, auth_notify_data=queue):
error ("Expected to find none result but found something different..")
# wait for reply
(status, status_msg) = queue.pop ()
# check for VortexOk status
if status != 2:
error ("Expected to find VortexOk status code, but found: " + str (status) + ", error message was: " + message)
return False
# check authentication status
if not vortex.sasl.is_authenticated (conn):
error ("Expected to find is authenticated status but found un-authenticated connection")
return False
if "http://iana.org/beep/SASL/PLAIN" != vortex.sasl.method_used (conn):
error ("Expected to find method used: http://iana.org/beep/SASL/PLAIN, but found: " + vortex.sasl.method_used (conn))
return False
# check auth id
if vortex.sasl.auth_id (conn) != "bob":
error ("Expected to find auth id bob but found: " + vortex.sasl.auth_id (conn))
return False
# close connection
conn.close ()
return True
def test_21_create_channel (conn, channel_num, profile, received, received_data, close, close_data, user_data, next_data):
info ("Called to create channel with profile: " + profile)
channel = conn.open_channel (channel_num, profile)
return channel
def test_21():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# connect
conn = vortex.Connection (ctx, host, port)
# create channel pool
pool = conn.channel_pool_new (REGRESSION_URI, 1)
# check number of channels in the pool
if pool.channel_count != 1:
error ("Expected to find channel count equal to 1 but found: " + str (pool.channel_count))
return False
# check channel pool id
if pool.id != 1:
error ("Expected to find channel pool id equal to 1 but found: " + str (pool.id))
return False
info ("Checking to acquire and release channel..")
iterator = 0
while iterator < 10:
# get a channel from the pool
channel = pool.next_ready ()
if not channel:
error ("Expected to find a channel reference available in the pool..but not found")
return False
if channel.number != 3:
error ("Expected to find channel number 3 but found: " + str (channel.number))
return False
# check number of channels that are available at this moment
if pool.channel_available != 0:
error ("Expected to not find any channel available but found: " + str (pool.channel_available))
return False
# ok, now release channel
pool.release (channel)
# check number of channels that are available at this moment
if pool.channel_available != 1:
error ("Expected to find 1 channel available but found: " + str (pool.channel_available))
return False
# next position
iterator += 1
info ("Checking to acquire and release channel through conn.pool() method")
# get a channel from the default pool
channel = conn.pool().next_ready ()
if not channel:
error ("Expected to find a channel reference available in the pool..but not found")
return False
if channel.number != 3:
error ("Expected to find channel number 3 but found: " + str (channel.number))
return False
# check number of channels that are available at this moment
if conn.pool().channel_available != 0:
error ("Expected to not find any channel available but found: " + str (conn.pool().channel_available))
return False
# ok, now release channel
conn.pool().release (channel)
# check number of channels that are available at this moment
if conn.pool().channel_available != 1:
error ("Expected to find 1 channel available but found: " + str (conn.pool().channel_available))
return False
info ("Checking to acquire and release channel through conn.pool(1) method")
# get a channel from a particular pool
channel = conn.pool(1).next_ready ()
if not channel:
error ("Expected to find a channel reference available in the pool..but not found")
return False
if channel.number != 3:
error ("Expected to find channel number 3 but found: " + str (channel.number))
return False
# check number of channels that are available at this moment
if conn.pool(1).channel_available != 0:
error ("Expected to not find any channel available but found: " + str (conn.pool(1).channel_available))
return False
# ok, now release channel
conn.pool(1).release (channel)
# check number of channels that are available at this moment
if conn.pool(1).channel_available != 1:
error ("Expected to find 1 channel available but found: " + str (conn.pool(1).channel_available))
return False
info ("Creating a new pool (using same variables)")
# create channel pool
pool = conn.channel_pool_new (REGRESSION_URI, 1,
create_channel=test_21_create_channel, create_channel_data=17)
# check number of channels in the pool
if pool.channel_count != 1:
error ("Expected to find channel count equal to 1 but found: " + str (pool.channel_count))
return False
# check channel pool id
if pool.id != 2:
error ("Expected to find channel pool id equal to 2 but found: " + str (pool.id))
return False
# get a channel from a particular pool
channel = conn.pool(2).next_ready ()
if not channel:
error ("Expected to find a channel reference available in the pool..but not found")
return False
if channel.number != 5:
error ("Expected to find channel number 5 but found: " + str (channel.number))
return False
# release channel
conn.pool(2).release (channel)
info ("Now checking to access to channels from first pool..");
# get a channel from a particular pool
channel = conn.pool(1).next_ready ()
if not channel:
error ("Expected to find a channel reference available in the pool..but not found")
return False
if channel.number != 3:
error ("Expected to find channel number 3 but found: " + str (channel.number))
return False
# release channel
conn.pool(1).release (channel)
info ("Finished release channel from first pool")
return True
def test_22_create_channel(conn, channel_num, profile, received, received_data, close, close_data, user_data, next_data):
info ("Called to create channel with profile: " + profile + ", and channel num: " + str (channel_num))
info ("User data received: " + str (user_data))
info ("Next data received: " + str (next_data))
# check beacon
if user_data[0] != 20:
error ("Expected to find create beacon equal to 20, but found: " + str (user_data[0]))
return None
# update beacon
user_data[0] = 21
return conn.open_channel (channel_num, profile)
def test_22_pool_created (pool, data):
info ("Called pool on created: " + str (pool) + ", with id: " + str (pool.id))
if pool.id != 1:
error ("ON HANDLER: Expected to find pool id equal to 1 but found: " + str (pool.id))
# now push the pool
data.push (pool)
info ("Pushed pool created")
return
def test_22_received (conn, channel, frame, queue):
# push frame received
queue.push (frame)
return
def test_22 ():
# create a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# connect
conn = vortex.Connection (ctx, host, port)
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# create channel pool
info ("Creating channel pool..")
close_beacon = 10
create_beacon = [20]
queue = vortex.AsyncQueue ()
conn.channel_pool_new (REGRESSION_URI, 1,
create_channel=test_22_create_channel, create_channel_data=create_beacon,
received=test_22_received, received_data=queue,
on_created=test_22_pool_created, user_data=queue)
info ("Getting channel pool reference..")
pool = queue.pop ()
info ("Received pool reference..")
# check channel pool
value = pool.id
if value != 1:
error ("Expected to find channel pool id equal to 1 but found: " + str (value))
print pool
print ("Id found: " + str (pool.id))
return False
# now check connection
if pool.conn.id != conn.id:
error ("Expected to find connection id: " + str (conn.id) + ", but found: " + str (pool.conn.id))
return False
info ("Checking rest of the API..")
if create_beacon[0] != 21:
error ("Expected to find value 21 but found: " + str (create_beacon[0]))
return False
# now check frame received
channel = conn.pool().next_ready ()
if not channel:
error ("Expected to find channel reference but found None..");
return False
# send message
channel.send_msg ("This is a test..", 16)
info ("Getting reply result..")
frame = queue.pop ()
if not frame:
error ("Expected to find frame reference but found None..")
return False
if frame.payload != "This is a test..":
error ("Expected to find frame payload content: 'This is a test..' but found: " + frame.payload)
return False
return True
def test_23_execute (ctx, queue, count):
count[0] += 1
info ("Count updated (1): " + str (count[0]))
if count[0] == 10:
# unlock caller
queue.push (1)
# request to finish event
return True
return False
def test_23_execute_2 (ctx, queue, count):
count[0] += 1
info ("Count updated (2): " + str (count[0]))
if count[0] == 4:
# unlock caller
queue.push (1)
# request to finish event
return True
return False
def test_23_execute_3 (ctx, queue, count):
count[0] += 1
info ("Count updated (3): " + str (count[0]))
if count[0] == 7:
# unlock caller
queue.push (1)
# request to finish event
return True
return False
def test_23 ():
# all to register events
ctx = vortex.Ctx ()
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# register event
info ("Installing event..")
queue = vortex.AsyncQueue ()
count = [0]
ctx.new_event (30000, test_23_execute, queue, count)
# get value
info ("Waiting for event to finish")
queue.pop ()
# register event
info ("Installing event 6..")
count = [0]
ctx.new_event (30000, test_23_execute, queue, count)
count2 = [0]
ctx.new_event (230000, test_23_execute_2, queue, count2)
count3 = [0]
ctx.new_event (830000, test_23_execute_3, queue, count3)
count4 = [0]
ctx.new_event (1130000, test_23_execute, queue, count4)
count5 = [0]
ctx.new_event (1230000, test_23_execute_2, queue, count5)
count6 = [0]
ctx.new_event (1830000, test_23_execute_3, queue, count6)
# get value
info ("Waiting for events to finish")
iterator = 0
while iterator < 6:
info (" ...one finished")
queue.pop ()
# next iterator
iterator += 1
return True
def test_24_failure_handler (conn, check_period, unreply_count):
# push connection id that failed
conn.get_data ("test_24_queue").push (conn.id)
return
def test_24 ():
# all to register events
ctx = vortex.Ctx ()
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
info ("Created connection id: " + str (conn.id))
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# print ref count
info ("Connection ref count: %d" % sys.getrefcount(conn))
# configure queue
queue = vortex.AsyncQueue ()
conn.set_data ("test_24_queue", queue)
# ok, now enable alive
if not vortex.alive.enable_check (conn, 20000, 10, test_24_failure_handler):
error ("Expect to find proper alive.enable_check but found failure..")
return False
# print ref count
info ("Connection ref count: %d" % sys.getrefcount(conn))
# block connection for a period
conn.block ()
# print ref count
info ("Connection ref count: %d" % sys.getrefcount(conn))
# check that the connection is blocked
if not conn.is_blocked ():
error ("Expected to find blocked connection but different status..")
return False
# print ref count
info ("Connection ref count: %d" % sys.getrefcount(conn))
# wait until failure happens
result = queue.pop ()
info ("Received connection closed id: " + str (result))
if result != conn.id:
error ("Expected to find connection id: " + str (conn.id) + ", but found: " + str (result))
info ("received connection close..")
queue.timedpop (200000)
# print ref count
info ("Connection ref count: %d" % sys.getrefcount(conn))
info ("Finshed test..")
# alive check ok
return True
def test_25 ():
# call to initialize a context
ctx = vortex.Ctx ()
# call to init ctx
if not ctx.init ():
error ("Failed to init Vortex context")
return False
# call to create a connection
conn = vortex.Connection (ctx, host, port)
# check connection status after if
if not conn.is_ok ():
error ("Expected to find proper connection result, but found error. Error code was: " + str(conn.status) + ", message: " + conn.error_msg)
return False
# now create a channel
channel = conn.open_channel (0, REGRESSION_URI)
if not channel:
error ("Expected to find proper channel creation, but error found:")
# get first message
err = conn.pop_channel_error ()
while err:
error ("Found error message: " + str (err[0]) + ": " + err[1])
# next message
err = conn.pop_channel_error ()
return False
# configure frame received handler
queue = vortex.AsyncQueue ()
channel.set_frame_received (vortex.queue_reply, queue)
# send a message to test */
channel.send_msg ("Camión", -1)
# wait for the reply
frame = channel.get_reply (queue)
# check result
if frame.payload != "Camión":
error ("Expected to find content: Camión but found: " + frame.payload)
return False
# send utf-8 content ok
return True
###########################
# intraestructure support #
###########################
def info (msg):
print "[ INFO ] : " + msg
def error (msg):
print "[ ERROR ] : " + msg
def ok (msg):
print "[ OK ] : " + msg
def run_all_tests ():
test_count = 0
for test in tests:
# print log
info ("TEST-" + str(test_count) + ": Running " + test[1])
# call test
if not test[0]():
error ("detected test failure at: " + test[1])
return False
# next test
test_count += 1
ok ("All tests ok!")
return True
# declare list of tests available
tests = [
(test_00_a, "Check PyVortex async queue wrapper"),
(test_01, "Check PyVortex context initialization"),
(test_02, "Check PyVortex basic BEEP connection"),
# (test_02a, "Check PyVortex log handler configuration"),
(test_03, "Check PyVortex basic BEEP connection (shutdown)"),
(test_03_a, "Check PyVortex connection set data"),
(test_04, "Check PyVortex basic BEEP channel creation"),
(test_05, "Check BEEP basic data exchange"),
(test_06, "Check BEEP check several send operations (serialize)"),
(test_07, "Check BEEP check several send operations (one send, one receive)"),
(test_08, "Check BEEP transfer zeroed binaries frames"),
(test_09, "Check BEEP channel support"),
(test_10, "Check BEEP channel creation deny"),
(test_10_a, "Check BEEP channel creation deny (a)"),
(test_10_b, "Check reference counting on async notifications"),
(test_10_c, "Check async channel start notification"),
(test_10_d, "Check async channel start notification (failure expected)"),
(test_10_e, "Check channel creation inside a function with frame received"),
(test_10_f, "Check connection close after sending message"),
(test_11, "Check BEEP listener support"),
(test_12, "Check connection on close notification"),
(test_12_a, "Check connection on close notification (during channel start)"),
(test_12_b, "Check channel start during connection close notify"),
(test_12_c, "Check close notification for conn refs not owned by caller"),
(test_12_d, "Check close notification for conn refs at listener"),
(test_12_e, "Check removing close notification"),
(test_13, "Check wrong listener allocation"),
(test_14, "Check SASL PLAIN support"),
(test_15, "Check SASL ANONYMOUS support"),
(test_16, "Check SASL DIGEST-MD5 support"),
(test_17, "Check SASL CRAM-MD5 support"),
(test_18, "Check TLS support"),
(test_19, "Check TLS support (async notification)"),
(test_20, "Check SASL PLAIN support (async notification)"),
(test_21, "Check channel pool support"),
(test_22, "Check channel pool support (handlers)"),
(test_23, "Check event tasks"),
(test_24, "Check alive implementation"),
(test_25, "Check sending utf-8 content")
]
# declare default host and port
host = "localhost"
port = "44010"
if __name__ == '__main__':
iterator = 0
for arg in sys.argv:
# according to the argument position, take the value
if iterator == 1:
host = arg
elif iterator == 2:
port = arg
# next iterator
iterator += 1
# drop a log
info ("Running tests against " + host + ":" + port)
# call to run all tests
run_all_tests ()
|
ASPLes/libvortex-1.1
|
py-vortex/test/vortex-regression-client.py
|
Python
|
lgpl-2.1
| 82,823
|
[
"VisIt"
] |
935de174cf61f96144ddf5b8fd33205db1445e1bc844845fcfac0405c8f96a62
|
#! /usr/bin/env python
# =================================
# = Simple unit tests for pycoda =
# =================================
#
# These will store/use an authentication token in codatests.tok in the current directory.
# This is just a simple test - a very long way from being exhaustive!
#
# Some tests may fail if the organisation's account is in active use, and data on server is changing
# while these tests are being performed. Should be otherwise harmless, though.
#
# This code is released under the GNU General Public License v2.
# See COPYRIGHT.txt and LICENSE.txt.
import unittest
import api
import os, sys, webbrowser, urllib2, time
import random
# Please create new test keys yourself and replace these. See api.py for info.
# You CERTAINLY SHOULD NOT use these for any real application.
# They may be disabled at any time.
# Please create new test keys yourself and replace these. See api.py for info.
# You CERTAINLY SHOULD NOT use these for any real application.
# They may be disabled at any time.
TEST_KEY = 'c1361963e1c2475f'
TEST_SECRET = '2cec36b84c7811c2'
TOKEN_FILENAME = 'codatests.tok'
HTTPError = urllib2.HTTPError
# Find a simplejson library somewhere!
try:
import json # Python 2.6 onwards
except ImportError:
try:
import simplejson as json
except ImportError:
"Please install the simplejson module or update to a Python version which includes json"
class AuthTestCase(unittest.TestCase):
def setUp(self):
self.codaserver = api.CodaServer(TEST_KEY,TEST_SECRET)
self.atok = None
if os.path.isfile(TOKEN_FILENAME):
tf = open(TOKEN_FILENAME, 'r')
print "\nLoading auth token from %s" % TOKEN_FILENAME
self.atok = json.load(tf)
tf.close()
if not self.atok:
(rtok, url) = self.codaserver.get_auth()
print "Opening web browser to confirm authentication request\nat %s\nPlease approve and then press return here" % url
import webbrowser
webbrowser.open(url)
sys.stdin.readline()
self.atok = self.codaserver.get_access_token(rtok)
tf = open(TOKEN_FILENAME, 'w')
print "Saving auth token to %s" % TOKEN_FILENAME
json.dump(self.atok, tf)
tf.close()
self.coda = self.codaserver.get_coda(self.atok)
def testAuth(self):
# Check an invalid auth
self.assertRaises(api.CodaException, lambda: self.codaserver.get_access_token("oauth_token_secret=randomstring&oauth_token=anotherstring"))
def testGetUser(self):
resp = self.coda.getUser()
self.assertTrue(resp.has_key('user_uuid'))
self.assertTrue(resp.has_key('username'))
def testGetOrganisation(self):
resp = self.coda.getOrganisation()
self.assertTrue(resp.has_key('name'))
self.assertTrue(resp.has_key('organisation_uuid'))
def testUsers(self):
"""List users, create and delete a user"""
orig_users = self.coda.getUsers()
new_user_name = 'test_user_' + str(int(time.time()))
resp = self.coda.createUser(username=new_user_name,
first_name="Test", last_name="User", email="test@example.com",
password="wibble"+str(int(time.time())), permission=3)
new_user_uuid = resp['user_uuid']
resp = self.coda.getUsers(user_uuid=new_user_uuid)
new_users = self.coda.getUsers()
self.assertEqual(len(new_users), len(orig_users)+1, "Count of users didn't change after adding one")
self.coda.removeUser(user_uuid=new_user_uuid)
new_users = self.coda.getUsers()
self.assertEqual(len(new_users), len(orig_users))
def testDisplays(self):
"""List display, and try a simple modification on one"""
displays = self.coda.getDisplays()
# Pick a display at random
disp = random.choice(displays)
uuid = disp['display_uuid']
# Add a new tag - not the most exciting test, I know, but we're using live displays here
tags = disp['tags']
num_tags = len(tags)
new_tag = "test_tag_%d" % random.randint(0, 314156)
self.coda.modifyDisplay(display_uuid = uuid, tags=tags+[new_tag])
# Read the info again and check it's there
new_disp_info = self.coda.getDisplays(display_uuid = uuid)
new_tags = new_disp_info[0]['tags']
self.assertEqual(len(new_tags), num_tags+1)
self.assertTrue(new_tag in new_tags)
# Remove it and check it's gone.
new_tags.remove(new_tag)
self.coda.modifyDisplay(display_uuid = uuid, tags=new_tags)
final_disp_info = self.coda.getDisplays(display_uuid = uuid)
final_tags = new_disp_info[0]['tags']
self.assertEqual(len(final_tags), num_tags)
self.assertTrue(new_tag not in new_tags)
def testSources(self):
"""List sources, create and delete a source"""
orig_sources = self.coda.getSources()
new_source_name = 'test_src_' + str(int(time.time()))
resp = self.coda.createSource(name=new_source_name,
type_uuid = '3c554dfe-f094-5f7e-0013-000000000010', # an HTML page
parameters = json.dumps({'url':"http://news.google.com"}))
new_source_uuid = resp['source_uuid']
# Check we can read both that single UUID...
resp = self.coda.getSources(source_uuid=new_source_uuid)
self.assertEqual(new_source_uuid, resp[0]['source_uuid'])
# and a list containing just that uuid
resp = self.coda.getSources(source_uuids=[new_source_uuid])
self.assertEqual(len(resp), 1)
self.assertEqual(new_source_uuid, resp[0]['source_uuid'])
new_sources = self.coda.getSources()
self.assertEqual(len(new_sources), len(orig_sources)+1, "Count of sources didn't change after adding one")
# Search for sources with this name
srch_src = self.coda.getSources(name=new_source_name)
# Check there's only one and it has the right uuid
self.assertEqual(len(srch_src), 1)
self.assertEqual(new_source_uuid, srch_src[0]['source_uuid'])
# Search for multiple sources (just one for now!)
# XXX This may not be live on the server yet!
#
srch_src = self.coda.getSources(source_uuids=[new_source_uuid])
# Check there's only one and it has the right uuid
self.assertEqual(len(srch_src), 1)
self.assertEqual(new_source_uuid, srch_src[0]['source_uuid'])
# The delete it and check it's gone
self.coda.removeSource(source_uuid=new_source_uuid)
new_sources = self.coda.getSources()
self.assertEqual(len(new_sources), len(orig_sources))
srch_src = self.coda.getSources(name=new_source_name)
self.assertEqual(len(srch_src), 0)
if __name__ == '__main__':
unittest.main()
|
camvine/pycoda
|
codatests.py
|
Python
|
gpl-2.0
| 6,984
|
[
"exciting"
] |
d53e6a5eb4826034a887ec63b2700d1c5c3952808e4fe3c8c4f5d59e0bd08ab4
|
"""
Define an abstract solver and a greedy, a random, a simulated annealing solvers
considering an uncertainty grid and penalizing large battery consumption.
"""
import numpy as np
import datetime
import copy
import operator
import random
import math
from sys import path
from simanneal import Annealer
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import cm
path.append("../..")
import settings
from solvers.solver import Solver
from solvers.uncertainty_solver import UncertaintySolver
class UncertaintyBatterySolver(UncertaintySolver):
"""
Define an abstract class for solvers considering an uncertainty grid and
penalizing large battery consumption.
"""
def __init__(self, state, mapper, nb_drone, penalizer=None):
"""
Initialize the abstract solver.
Keyword arguments:
state: Initial plan
mapper: Representation of the environment
nb_drone: Number of drones
"""
UncertaintySolver.__init__(self, state, mapper, nb_drone)
self.uncertainty_rate = 0
self.battery_consumption = 0
if penalizer != None:
self.penalizer = penalizer
else:
self.penalizer = settings.PENALIZATION_COEFFICIENT
def compute_performance(self):
"""
Compute the average uncertainty rate of the points of interest.
"""
mean, battery = self.estimate_uncertainty_points()
return mean, battery
class UncertaintyBatteryRandomSolver(UncertaintyBatterySolver):
"""
Define a random solver.
"""
def __init__(self, state, mapper, nb_drone, penalizer=None):
"""
Initialize the random solver.
Keyword arguments:
state: Initial plan
mapper: Representation of the environment
nb_drone: Number of drones
"""
UncertaintyBatterySolver.__init__(self, state, mapper, nb_drone, penalizer)
def solve(self):
"""
Shuffle the order of visit for MAX_RANDOM_PLANNER_ITERATION and return
the best solution found.
"""
self.remove_impossible_targets()
random.shuffle(self.targets)
best_move = list(self.targets)
mean, battery = self.compute_performance()
best_perf = 10000 * mean + self.penalizer * battery
for i in range(settings.MAX_RANDOM_PLANNER_ITERATION):
random.shuffle(self.state)
mean, battery = self.compute_performance()
perf = 10000 * mean + self.penalizer * battery
if perf < best_perf:
best_move = list(self.state)
self.state = best_move
class UncertaintyBatterySimulatedAnnealingSolver(Annealer, UncertaintyBatterySolver):
"""
Define a simulated annealing solver.
"""
def __init__(self, state, mapper, nb_drone, nb_change=1, penalizer=None):
"""
Initialize the simulated annealing solver.
Keyword arguments:
state: Initial plan
mapper: Representation of the environment
nb_drone: Number of drones
nb_change: Number of random permutations (see annealing process)
"""
UncertaintyBatterySolver.__init__(self, state, mapper, nb_drone, penalizer)
self.nb_change = nb_change
def solve(self):
"""
Launch the annealing process
"""
self.remove_impossible_targets()
itinerary, energy = self.anneal()
self.state = list(itinerary)
mean, battery = self.compute_performance()
self.uncertainty_rate = mean
self.battery_consumption = battery
return self.state, energy
def move(self):
"""
Define the annealing process (required by the Annealer class)
"""
for c in range(self.nb_change):
a = 0
b = 0
while a == b:
a = random.randint(0, len(self.state) - 1)
b = random.randint(0, len(self.state) - 1)
self.state[a], self.state[b] = self.state[b], self.state[a]
def energy(self):
"""
Function required by the Annealer class
"""
mean, battery = self.compute_performance()
e = mean * 10000 + self.penalizer * battery
return e
|
OPU-Surveillance-System/monitoring
|
master/scripts/planner/solvers/uncertainty_battery_solver.py
|
Python
|
mit
| 4,288
|
[
"VisIt"
] |
ae26af5e0bc2bdb9e9ea69ba08ebee41ca072fd4322ca244a16edab48ed7609a
|
#!/usr/bin/python
"""
Copyright 2016 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import MySQLdb
import time
import dbShared
# make sure the sessions table exists
def verifySessionDB():
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute("show tables like 'tSessions';")
row = cursor.fetchone()
if row == None:
tablesql = "CREATE TABLE tSessions (sid VARCHAR(40) NOT NULL PRIMARY KEY, userID VARCHAR(32) NOT NULL, expires FLOAT, pushKey VARCHAR(255));"
cursor.execute(tablesql)
cursor.close()
conn.close()
# look up a session id and see if it is valid
def getSession(sid, duration):
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute("SELECT userID, expires FROM tSessions WHERE sid='" + sid + "'")
row = cursor.fetchone()
if row == None:
# no record
result = ""
else:
if time.time() > row[1]:
# session is expired, delete it
result = ""
tempSQL = "DELETE FROM tSessions WHERE sid='" + sid + "'"
cursor.execute(tempSQL)
else:
# good session, return userid
result = row[0]
cursor.close()
conn.close()
return result
|
druss316/G-Harvestor
|
html/dbSession.py
|
Python
|
gpl-3.0
| 1,782
|
[
"Galaxy"
] |
c6d9722ac7a43154a73136c110d2f7952b724c9ae46c4f70a707ec618c3f1d23
|
"""Infrastructure for RNA-seq supporting files.
"""
import os
from fabric.api import cd
def finalize(genomes, env):
"""Provide symlinks back to reference genomes so tophat avoids generating FASTA genomes.
"""
genome_dir = os.path.join(env.data_files, "genomes")
for (orgname, gid, manager) in genomes:
org_dir = os.path.join(genome_dir, orgname)
for aligner in ["bowtie", "bowtie2"]:
aligner_dir = os.path.join(org_dir, gid, aligner)
if env.safe_exists(aligner_dir):
with cd(aligner_dir):
for ext in ["", ".fai"]:
orig_seq = os.path.join(os.pardir, "seq", "%s.fa%s" % (gid, ext))
if env.safe_exists(orig_seq) and not env.safe_exists(os.path.basename(orig_seq)):
env.safe_run("ln -sf %s" % orig_seq)
def cleanup(genomes, env):
"""Cleanup for GGD recipe installation, removing old rnaseq symlinks.
"""
folder_name = "rnaseq"
genome_dir = os.path.join(env.data_files, "genomes")
for (orgname, gid, manager) in genomes:
org_dir = os.path.join(genome_dir, orgname)
target_dir = os.path.join(org_dir, gid, folder_name)
if env.hosts == ["localhost"]:
if os.path.lexists(target_dir) and os.path.islink(target_dir):
os.remove(target_dir)
|
joemphilips/cloudbiolinux
|
cloudbio/biodata/rnaseq.py
|
Python
|
mit
| 1,372
|
[
"Bowtie"
] |
e25bf5c5d4ca9e0f651cf89011ecaeba512493f98ddd1ad26946ebd4d50835bd
|
from ovito import *
from ovito.io import *
from ovito.modifiers import *
from ovito.data import *
import numpy
node = import_file("../../files/CFG/shear.void.120.cfg")
modifier = PythonScriptModifier()
node.modifiers.append(modifier)
def compute_coordination(pindex, finder):
return sum(1 for _ in finder.find(pindex))
def modify(frame, input, output):
yield "Hello world"
color_property = output.create_particle_property(ParticleProperty.Type.Color)
color_property.marray[:] = (0,0.5,0)
my_property = output.create_user_particle_property("MyCoordination", "int")
finder = CutoffNeighborFinder(3.5, input)
for index in range(input.number_of_particles):
if index % 100 == 0: yield index/input.number_of_particles
my_property.marray[index] = compute_coordination(index, finder)
modifier.function = modify
node.compute()
assert((node.output.color.array[0] == numpy.array([0,0.5,0])).all())
assert(node.output["MyCoordination"].array[0] > 0)
|
srinath-chakravarthy/ovito
|
tests/scripts/test_suite/python_script_modifier.py
|
Python
|
gpl-3.0
| 990
|
[
"OVITO"
] |
d4b01474e84ce947ded462aee48f5811ccbbde47340602cb861646fed3459822
|
from distutils.core import setup
setup(
name = 'simplekml',
packages = ['simplekml'],
version = '1.2.3',
description = 'A Simple KML creator',
author='Kyle Lancaster',
author_email='kyle.lan@gmail.com',
url='http://code.google.com/p/simplekml/',
license='GNU General Public License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Software Development :: Libraries :: Python Modules'
],
long_description="""
simplekml is a python package which enables you to generate KML with as little effort as possible.
At the time of making this package nothing was available (at least I could not find anything) that could create KML files easily. You needed a lot of bloated code to even create a simple point. This is understandable because the KML standard is quite extensive, but what if you just work with the simple elements of KML like Document, Folder, Point, LineString and Polygon? This package supports those elements and everything documented in the KML Reference. With simplekml creating a KML file containing a point as simple as::
import simplekml
kml = simplekml.Kml()
kml.newpoint(name="Kirstenbosch", coords=[(18.432314,-33.988862)])
kml.save("botanicalgarden.kml")
See the Documentation_ for usage and reference or visit the Homepage_ for more information.
.. _Documentation: http://simplekml.readthedocs.org
.. _Homepage: http://code.google.com/p/simplekml/
"""
)
|
Sakartu/simplekml
|
setup.py
|
Python
|
gpl-3.0
| 2,132
|
[
"VisIt"
] |
075ddb8b6fa13515ca4d77b82fc758b8e7e4bf2ca3b1aba7512cc259a7b0d4eb
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import warnings
import six
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
import numpy.core.umath_tests # pylint: disable=unused-import
import skopt
from guild import batch_util
from guild import flag_util
from guild import op_util
from guild import query as qparse
log = logging.getLogger("guild")
DEFAULT_MAX_TRIALS = 20
DEFAULT_OBJECTIVE = "loss"
###################################################################
# Exceptions
###################################################################
class MissingSearchDimension(Exception):
def __init__(self, flag_vals):
super(MissingSearchDimension, self).__init__(flag_vals)
self.flag_vals = flag_vals
class InvalidSearchDimension(Exception):
pass
class InvalidObjective(Exception):
pass
###################################################################
# Random trials
###################################################################
def random_trials_for_flags(flag_vals, count, random_seed=None):
names, dims, initial_x = flag_dims(flag_vals)
if not names:
raise MissingSearchDimension(flag_vals)
trials = _trials_for_dims(names, dims, initial_x, count, random_seed)
_apply_missing_flag_vals(flag_vals, trials)
return trials
def _trials_for_dims(names, dims, initial_x, num_trials, random_seed):
res = skopt.dummy_minimize(
lambda *args: 0, dims, n_calls=num_trials, random_state=random_seed
)
trials_xs = res.x_iters
if trials_xs:
_apply_initial_x(initial_x, trials_xs[0])
return [dict(zip(names, native_python_xs(xs))) for xs in trials_xs]
def native_python_xs(xs):
def pyval(x):
try:
return x.item()
except AttributeError:
return x
return [pyval(x) for x in xs]
def _apply_initial_x(initial_x, target_x):
assert len(initial_x) == len(target_x)
for i, x in enumerate(initial_x):
if x is not None:
target_x[i] = x
def _apply_missing_flag_vals(flag_vals, trials):
for trial in trials:
trial.update({name: flag_vals[name] for name in flag_vals if name not in trial})
###################################################################
# Flag dims
###################################################################
def flag_dims(flags):
"""Return flag names, dims, and initials for flags.
Only flag value that correspond to searchable dimensions are
returned. Scalars and non-function string values are not included
in the result.
"""
dims = {}
initials = {}
for name, val in flags.items():
try:
flag_dim, initial = _flag_dim(val, name)
except ValueError:
pass
else:
dims[name] = flag_dim
initials[name] = initial
names = sorted(dims)
return (names, [dims[name] for name in names], [initials[name] for name in names])
def _flag_dim(val, flag_name):
if isinstance(val, list):
return _categorical_dim(val, None)
elif isinstance(val, six.string_types):
return _try_function_dim(val, flag_name)
else:
raise ValueError(val, flag_name)
def _categorical_dim(vals, initial):
from skopt.space import space
return space.Categorical(vals), initial
def _try_function_dim(val, flag_name):
assert isinstance(val, six.string_types), val
try:
func_name, func_args = flag_util.decode_flag_function(val)
except ValueError:
raise ValueError(val, flag_name)
else:
return function_dim(func_name, func_args, flag_name)
def function_dim(func_name, args, flag_name):
if func_name is None:
func_name = "uniform"
if func_name == "uniform":
return _uniform_dim(args, func_name, flag_name)
elif func_name == "loguniform":
return _real_dim(args, "log-uniform", func_name, flag_name)
else:
raise InvalidSearchDimension(
"unknown function '%s' used for flag %s" % (func_name, flag_name)
)
def _uniform_dim(args, func_name, flag_name):
from skopt.space import space
dim_args, initial = _dim_args_and_initial(args, func_name, flag_name)
return space.check_dimension(dim_args), initial
def _real_dim(args, prior, func_name, flag_name):
from skopt.space import space
dim_args, initial = _dim_args_and_initial(args, func_name, flag_name)
real_init_args = list(dim_args) + [prior]
return space.Real(*real_init_args), initial
def _dim_args_and_initial(args, func_name, flag_name):
if len(args) == 2:
return args, None
elif len(args) == 3:
return args[:2], args[2]
else:
raise InvalidSearchDimension(
"%s requires 2 or 3 args, got %s for flag %s" % (func_name, args, flag_name)
)
###################################################################
# Sequential trials handler
###################################################################
def handle_seq_trials(batch_run, suggest_x_cb):
if os.getenv("PRINT_TRIALS_CMD") == "1":
_print_trials_cmd_not_supported_error()
elif os.getenv("PRINT_TRIALS") == "1":
_print_trials_not_supported_error()
elif os.getenv("SAVE_TRIALS"):
_save_trials_not_supported_error()
else:
try:
_run_seq_trials(batch_run, suggest_x_cb)
except MissingSearchDimension as e:
missing_search_dim_error(e.flag_vals)
except InvalidObjective as e:
_handle_general_error(e)
def _run_seq_trials(batch_run, suggest_x_cb):
proto_flag_vals = batch_run.batch_proto.get("flags")
batch_flag_vals = batch_run.get("flags")
max_trials = batch_run.get("max_trials") or DEFAULT_MAX_TRIALS
random_state = batch_run.get("random_seed")
random_starts = min(batch_flag_vals.get("random-starts") or 0, max_trials)
objective_scalar, objective_negate = _objective_y_info(batch_run)
prev_trials_model = (
batch_flag_vals.get("prev-trials") or batch_util.PREV_TRIALS_BATCH
)
prev_trials_cb = lambda: batch_util.trial_results(
batch_run, [objective_scalar], prev_trials_model
)
trials_count = 0
for trial_flag_vals, is_trial_random_start, prev_trials, x0 in _iter_seq_trials(
proto_flag_vals,
objective_negate,
max_trials,
random_state,
random_starts,
prev_trials_cb,
suggest_x_cb,
batch_flag_vals,
):
_log_seq_trial(
is_trial_random_start,
random_starts,
trials_count,
x0,
prev_trials,
objective_scalar,
)
trial_run = batch_util.init_trial_run(batch_run, trial_flag_vals)
try:
batch_util.start_trial_run(trial_run)
except SystemExit as e:
batch_util.handle_trial_system_exit(e, batch_run, trial_run)
else:
trials_count += 1
def _iter_seq_trials(
proto_flag_vals,
objective_negate,
max_trials,
random_state,
random_starts,
prev_trials_cb,
suggest_x_cb,
suggest_x_opts,
):
names, dims, initial_x = _flag_dims_for_search(proto_flag_vals)
runs_count = 0
for _ in range(max_trials):
prev_trials = prev_trials_cb()
x0, y0 = _trials_xy_for_prev_trials(prev_trials, names, objective_negate)
is_random_start = _is_random_start(x0, runs_count, random_starts)
suggested_x, random_state = _suggest_x(
suggest_x_cb,
dims,
x0,
y0,
is_random_start,
random_state,
suggest_x_opts,
)
if runs_count == 0 and suggested_x:
_apply_initial_x(initial_x, suggested_x)
trial_flag_vals = _trial_flags_for_x(suggested_x, names, proto_flag_vals)
yield trial_flag_vals, is_random_start, prev_trials, x0
runs_count += 1
def _flag_dims_for_search(proto_flag_vals):
names, dims, initial_x = flag_dims(proto_flag_vals)
if not names:
raise MissingSearchDimension(proto_flag_vals)
return names, dims, initial_x
def _objective_y_info(batch_run):
objective_spec = batch_run.get("objective") or DEFAULT_OBJECTIVE
if objective_spec[0] == "-":
objective_spec = objective_spec[1:]
y_negate = -1
else:
y_negate = 1
try:
colspec = qparse.parse_colspec(objective_spec)
except qparse.ParseError as e:
raise InvalidObjective("invalid objective %r: %s" % (objective_spec, e))
else:
if len(colspec.cols) > 1:
raise InvalidObjective(
"invalid objective %r: too many columns" % objective_spec
)
col = colspec.cols[0]
prefix, key = col.split_key()
y_scalar_col = (prefix, key, col.qualifier)
return y_scalar_col, y_negate
def _trials_xy_for_prev_trials(prev_trials, names, objective_negate):
assert names
x0 = []
y0 = []
for flags, y_scalars in prev_trials:
assert len(y_scalars) == 1
y = y_scalars[0]
if y is None:
continue
x0.append([flags.get(name) for name in names])
y0.append(objective_negate * y)
if not x0:
return None, None
return x0, y0
def _is_random_start(x0, runs_count, wanted_random_starts):
return x0 is None or runs_count < wanted_random_starts
def _log_seq_trial(
is_random_start, random_starts, runs_count, x0, prev_trials, objective
):
"""Logs whether trial is random or based on previous trials.
is_random_start is the authoritative flag that indicates
whether or not a random trial is used. The remaining args are used
to infer the explanation.
"""
if is_random_start:
explanation = _random_start_explanation(
random_starts, runs_count, x0, prev_trials, objective
)
log.info("Random start for optimization (%s)", explanation)
else:
log.info("Found %i previous trial(s) for use in optimization", len(prev_trials))
def _random_start_explanation(random_starts, runs_count, x0, prev_trials, objective):
if runs_count < random_starts:
return "%s of %s" % (runs_count + 1, random_starts)
elif not prev_trials:
return "missing previous trials"
elif not x0:
return "cannot find objective '%s'" % _format_objective(objective)
else:
assert False, (random_starts, runs_count, x0, prev_trials, objective)
def _format_objective(objective):
prefix, tag, _qual = objective
if not prefix:
return tag
return "%s#%s" % (prefix, tag)
def _suggest_x(suggest_x_cb, dims, x0, y0, is_random_start, random_state, suggest_opts):
log.debug(
"suggestion inputs: dims=%s x0=%s y0=%s "
"random_start=%s random_state=%s opts=%s",
dims,
x0,
y0,
is_random_start,
random_state,
suggest_opts,
)
return suggest_x_cb(dims, x0, y0, is_random_start, random_state, suggest_opts)
def _trial_flags_for_x(x, names, proto_flag_vals):
flags = dict(proto_flag_vals)
flags.update(dict(zip(names, native_python_xs(x))))
return flags
###################################################################
# Sequential trials ipy support
###################################################################
def ipy_gen_trials(
proto_flag_vals,
prev_results_cb,
suggest_x_cb,
max_trials=None,
random_seed=None,
random_starts=None,
minimize=None,
maximize=None,
suggest_x_opts=None,
**_kw
):
objective_scalar, objective_negate = _ipy_objective(minimize, maximize)
prev_trials_cb = _ipy_prev_trials_cb(prev_results_cb, objective_scalar)
trials_count = 0
for trial_flag_vals, is_trial_random_start, prev_trials, x0 in _iter_seq_trials(
proto_flag_vals,
objective_negate,
max_trials,
random_seed,
random_starts,
prev_trials_cb,
suggest_x_cb,
suggest_x_opts,
):
_log_seq_trial(
is_trial_random_start,
random_starts,
trials_count,
x0,
prev_trials,
objective_scalar,
)
yield trial_flag_vals
trials_count += 1
def _ipy_objective(minimize, maximize):
colspec, negate = _ipy_objective_colspec(minimize, maximize)
try:
cols = qparse.parse_colspec(colspec).cols
except qparse.ParseError as e:
raise ValueError("cannot parse objective %r: %s" % (colspec, e))
else:
if len(cols) > 1:
raise ValueError(
"invalid objective %r: only one column may " "be specified" % colspec
)
scalar = cols[0]
prefix, tag = scalar.split_key()
return (prefix, tag, scalar.qualifier), negate
def _ipy_objective_colspec(minimize, maximize):
if minimize and maximize:
raise ValueError("minimize and maximize cannot both be specified")
if not minimize and not maximize:
return DEFAULT_OBJECTIVE, 1
if minimize:
return minimize, 1
assert maximize
return maximize, -1
def _ipy_prev_trials_cb(prev_results_cb, objective_scalar):
def f():
runs, _results = prev_results_cb()
return batch_util.trial_results_for_runs(runs, [objective_scalar])
return f
###################################################################
# Error handlers
###################################################################
def missing_search_dim_error(flag_vals):
log.error(
"flags for batch (%s) do not contain any search dimensions\n"
"Try specifying a range for one or more flags as NAME=[MIN:MAX].",
op_util.flags_desc(flag_vals),
)
raise SystemExit(1)
def _print_trials_cmd_not_supported_error():
log.error("optimizer does not support printing trials command")
raise SystemExit(1)
def _print_trials_not_supported_error():
log.error("optimizer does not support printing trials")
raise SystemExit(1)
def _save_trials_not_supported_error():
log.error("optimizer does not support saving trials")
raise SystemExit(1)
def _handle_general_error(e):
log.error(e)
raise SystemExit(1)
###################################################################
# Patched functions
###################################################################
def patched_gp_minimize(
func,
dimensions,
base_estimator=None,
n_calls=100,
n_random_starts=None,
acq_func="gp_hedge",
acq_optimizer="auto",
x0=None,
y0=None,
random_state=None,
verbose=False,
callback=None,
n_points=10000,
n_restarts_optimizer=5,
xi=0.01,
kappa=1.96,
noise="gaussian",
n_jobs=1,
model_queue_size=None,
):
"""Patched version of skopt.gp_minimize.
If `base_estimator` is not specified, provides a default estimator
for GP that is non-normalizing for values of y. This works around
these issues:
- https://github.com/guildai/guildai/issues/218
- https://github.com/scikit-optimize/scikit-optimize/issues/947
- https://github.com/scikit-learn/scikit-learn/pull/18388
- https://github.com/scikit-learn/scikit-learn/issues/18318
"""
if base_estimator is None:
base_estimator = _patched_gp_base_estimator(dimensions, random_state, noise)
return skopt.gp_minimize(
func,
dimensions,
base_estimator=base_estimator,
n_calls=n_calls,
n_random_starts=n_random_starts,
acq_func=acq_func,
acq_optimizer=acq_optimizer,
x0=x0,
y0=y0,
random_state=random_state,
verbose=verbose,
callback=callback,
n_points=n_points,
n_restarts_optimizer=n_restarts_optimizer,
xi=xi,
kappa=kappa,
noise=noise,
n_jobs=n_jobs,
model_queue_size=model_queue_size,
)
def _patched_gp_base_estimator(dimensions, random_state, noise):
"""Returns a GP non-y-normalizing GP estimator."""
import numpy as np
from sklearn.utils import check_random_state
from skopt.utils import normalize_dimensions
space = normalize_dimensions(dimensions)
rng = check_random_state(random_state)
estimator = skopt.utils.cook_estimator(
"GP",
space=space,
random_state=rng.randint(0, np.iinfo(np.int32).max),
noise=noise,
)
# The point of this function - setting normalize_y to False.
estimator.normalize_y = False
return estimator
|
guildai/guild
|
guild/plugins/skopt_util.py
|
Python
|
apache-2.0
| 17,278
|
[
"Gaussian"
] |
39d3795216a962778dcb5e57579d807d8f3aeb6bc1f02b7567080740eb500ecb
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy as np
import scipy.ndimage as nd
from scipy.interpolate import InterpolatedUnivariateSpline
from astropy.convolution import Gaussian2DKernel, convolve_fft
from astropy.wcs import WCS
import astropy.units as u
from warnings import warn
from ..stats_utils import standardize, common_scale
from ..base_statistic import BaseStatisticMixIn
from ...io import common_types, twod_types, input_data, find_beam_properties
class Genus(BaseStatisticMixIn):
"""
Genus Statistics based off of Chepurnov et al. (2008).
Parameters
----------
img : %(dtypes)s
2D image.
min_value : `~astropy.units.Quantity` or float, optional
Minimum value in the data to consider. If None, the minimum is used.
When `img` has an attached brightness unit, `min_value` must have the
same units.
max_value : `~astropy.units.Quantity` or float, optional
Maximum value in the data to consider. If None, the maximum is used.
When `img` has an attached brightness unit, `min_value` must have the
same units.
lowdens_percent : float, optional
Lower percentile of the data to use. Defaults to the minimum value.
Overrides `min_value` when the value of this percentile is greater
than `min_value`.
highdens_percent : float, optional
Upper percentile of the data to use. Defaults to the maximum value.
Overrides `max_value` when the value of this percentile is lower than
`max_value`.
numpts : int, optional
Number of thresholds to calculate statistic at.
smoothing_radii : np.ndarray or `astropy.units.Quantity`, optional
Kernel radii to smooth data to. If units are not attached, the radii
are assumed to be in pixels. If no radii are given, 5 smoothing radii
will be used ranging from 1 pixel to one-tenth the smallest dimension
size.
distance : `~astropy.units.Quantity`, optional
Physical distance to the region in the data.
Examples
--------
>>> from turbustat.statistics import Genus
>>> from astropy.io import fits
>>> import astropy.units as u
>>> import numpy as np
>>> moment0 = fits.open("Design4_21_0_0_flatrho_0021_13co.moment0.fits")[0] # doctest: +SKIP
>>> genus = Genus(moment0, lowdens_percent=15, highdens_percent=85) # doctest: +SKIP
>>> genus.run() # doctest: +SKIP
"""
__doc__ %= {"dtypes": " or ".join(common_types + twod_types)}
def __init__(self, img, min_value=None, max_value=None, lowdens_percent=0,
highdens_percent=100, numpts=100, smoothing_radii=None,
distance=None):
super(Genus, self).__init__()
if isinstance(img, np.ndarray):
self.need_header_flag = False
self.data = input_data(img, no_header=True)
self.header = None
else:
self.need_header_flag = True
self.data, self.header = input_data(img, no_header=False)
if distance is not None:
self.distance = distance
if min_value is None:
min_value = np.nanmin(self.data)
else:
if hasattr(self.data, 'unit'):
if not hasattr(min_value, 'unit'):
raise TypeError("data has units of {}. 'min_value' must "
"have equivalent units."
.format(self.data.unit))
if not min_value.unit.is_equivalent(self.data.unit):
raise u.UnitsError("min_value does not have an equivalent "
"units to the img unit.")
min_value = min_value.to(self.data.unit)
if max_value is None:
max_value = np.nanmax(self.data)
else:
if hasattr(self.data, 'unit'):
if not hasattr(max_value, 'unit'):
raise TypeError("data has units of {}. 'max_value' must "
"have equivalent units."
.format(self.data.unit))
if not max_value.unit.is_equivalent(self.data.unit):
raise u.UnitsError("max_value does not have an equivalent "
"units to the img unit.")
max_value = max_value.to(self.data.unit)
min_percent = \
np.percentile(self.data[~np.isnan(self.data)],
lowdens_percent)
max_percent = \
np.percentile(self.data[~np.isnan(self.data)],
highdens_percent)
if min_value is None or min_percent > min_value:
min_value = min_percent
if max_value is None or max_percent > max_value:
max_value = max_percent
self._thresholds = np.linspace(min_value, max_value, numpts)
if smoothing_radii is None:
self.smoothing_radii = np.array([1.0])
else:
if isinstance(smoothing_radii, u.Quantity):
self.smoothing_radii = self._to_pixel(smoothing_radii).value
else:
self.smoothing_radii = smoothing_radii
@property
def thresholds(self):
'''
Values of the data to compute the Genus statistics at.
'''
return self._thresholds
@property
def smoothing_radii(self):
'''
Pixel radii used to smooth the data.
'''
return self._smoothing_radii
@smoothing_radii.setter
def smoothing_radii(self, values):
if np.any(values < 1.0):
raise ValueError("All smoothing radii must be larger than one"
" pixel.")
if np.any(values > 0.5 * min(self.data.shape)):
raise ValueError("All smoothing radii must be smaller than half of"
" the image shape.")
self._smoothing_radii = values
@property
def smoothed_images(self):
'''
List of smoothed versions of the image, using the radii in
`~Genus.smoothing_radii`.
'''
if not hasattr(self, '_smoothed_images'):
raise ValueError("Set `keep_smoothed_images=True` in "
"Genus.make_genus_curve")
return self._smoothed_images
def make_genus_curve(self, use_beam=False, min_size=4,
connectivity=1, keep_smoothed_images=False,
match_kernel=False,
**convolution_kwargs):
'''
Smooth the data with a Gaussian kernel to create the genus curve from
at the specified thresholds.
Parameters
----------
use_beam : bool, optional
When enabled, will use the given `beam_fwhm` or try to load it from
the header. When disabled, the minimum size is set by `min_size`.
min_size : int or `~astropy.units.Quantity`, optional
Directly specify the minimum
area a region must have to be counted. Integer values with no units
are assumed to be in pixels.
connectivity : {1, 2}, optional
Connectivity used when removing regions below min_size.
keep_smoothed_images : bool, optional
Keep the convolved images in the `~Genus.smoothed_images` list.
Default is `False`.
match_kernel : bool, optional
Match kernel shape to the data shape when convolving. Default is
`False`. Enable to reproduce behaviour of `~Genus` prior to
version 1.0 of TurbuStat.
convolution_kwargs: Passed to `~astropy.convolve.convolve_fft`.
'''
if keep_smoothed_images:
self._smoothed_images = []
if use_beam:
major, minor = find_beam_properties(self.header)[:2]
major = self._to_pixel(major)
minor = self._to_pixel(minor)
# the area of a Gaussian beam is 2 pi sigma^2, and major/minor are FWHMs
pix_area = 2 * np.pi * major * minor / np.sqrt(8*np.log(2))
min_size = int(np.floor(pix_area.value))
else:
if isinstance(min_size, u.Quantity):
# Convert to pixel area
min_size = self._to_pixel_area(min_size)
min_size = int(np.floor(min_size.value))
else:
min_size = int(min_size)
self._genus_stats = np.empty((len(self.smoothing_radii),
len(self.thresholds)))
for j, width in enumerate(self.smoothing_radii):
if match_kernel:
kernel = Gaussian2DKernel(width, x_size=self.data.shape[0],
y_size=self.data.shape[1])
else:
kernel = Gaussian2DKernel(width)
smooth_img = convolve_fft(self.data, kernel, **convolution_kwargs)
if keep_smoothed_images:
self._keep_smoothed_images.append(smooth_img)
for i, thresh in enumerate(self.thresholds):
high_density = remove_small_objects(smooth_img > thresh,
min_size=min_size,
connectivity=connectivity)
low_density = remove_small_objects(smooth_img < thresh,
min_size=min_size,
connectivity=connectivity)
# eight-connectivity to count the regions
high_density_labels, high_density_num = \
nd.label(high_density, np.ones((3, 3)))
low_density_labels, low_density_num = \
nd.label(low_density, np.ones((3, 3)))
self._genus_stats[j, i] = high_density_num - low_density_num
@property
def genus_stats(self):
'''
Array of genus statistic values for all smoothed images (0th axis) and
all threshold values (1st axis).
'''
return self._genus_stats
def plot_fit(self, save_name=None, color='r', symbol='o'):
'''
Plot the Genus curves.
Parameters
----------
save_name : str,optional
Save the figure when a file name is given.
color : {str, RGB tuple}, optional
Color to show the Genus curves in.
'''
import matplotlib.pyplot as plt
num = len(self.smoothing_radii)
num_cols = num // 2 if num % 2 == 0 else (num // 2) + 1
for i in range(1, num + 1):
if num == 1:
ax = plt.subplot(111)
else:
ax = plt.subplot(num_cols, 2, i)
# plt.title("Smooth Size: {0}".format(self.smoothing_radii[i - 1]))
ax.text(0.3, 0.1,
"Smooth Size: {0:.2f}".format(self.smoothing_radii[i - 1]),
transform=ax.transAxes, fontsize=12)
plt.plot(self.thresholds, self.genus_stats[i - 1],
"{}-".format(symbol),
color=color)
plt.grid(True)
if (num - i + 1) <= 2:
plt.xlabel("Intensity")
else:
plt.setp(ax.get_xticklabels(), visible=False)
plt.tight_layout()
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
def run(self, verbose=False, save_name=None,
color='r', symbol='o', **kwargs):
'''
Run the whole statistic.
Parameters
----------
verbose : bool, optional
Enables plotting.
save_name : str,optional
Save the figure when a file name is given. Must have `verbose`
enabled for plotting.
kwargs : See `~Genus.make_genus_curve`.
'''
self.make_genus_curve(**kwargs)
if verbose:
self.plot_fit(save_name=save_name, color=color, symbol=symbol)
return self
class Genus_Distance(object):
"""
Distance Metric for the Genus Statistic.
.. note:: Since the data need to be normalized for the distance metrics,
there is no option to pass a pre-compute `~Genus` statistic.
Parameters
----------
img1 : %(dtypes)s
2D image.
img2 : %(dtypes)s
2D image.
smoothing_radii : list, optional
Kernel radii to smooth data to. See `~Genus`.
numpts : int, optional
Number of thresholds to calculate statistic at. See `~Genus`.
min_value : `~astropy.units.Quantity` or float or list, optional
Minimum value to use for Genus statistic. When a two-element list is
given, the first item is used for `img1` and the second for
`img2`. See `~Genus`.
max_value : `~astropy.units.Quantity` or float, optional
Maximum value to use for Genus statistic. When a two-element list is
given, the first item is used for `img1` and the second for
`img2`. See `~Genus`.
lowdens_percent : float, optional
Lowest percentile of the data to use for Genus statistic.
When a two-element list is given, the first item is used for
`img1` and the second for `img2`. See `~Genus`.
highdens_percent : float, optional
Highest percentile of the data to use for Genus statistic.
When a two-element list is given, the first item is used for
`img1` and the second for `img2`. See `~Genus`.
genus_kwargs : dict, optional
Dictionary passed to `~Genus.run`.
genus2_kwargs : None or dict, optional
Dictionary passed to `~Genus.run` for `img2`. When `None` is given,
settings from `genus_kwargs` are used for `img2`.
"""
__doc__ %= {"dtypes": " or ".join(common_types + twod_types)}
def __init__(self, img1, img2, smoothing_radii=None, numpts=100,
min_value=None, max_value=None, lowdens_percent=0,
highdens_percent=100,
genus_kwargs={}, genus2_kwargs=None):
# Check if list for inputs, where first is for img1 and second is
# for img2
if not isinstance(min_value, list):
min_value = [min_value] * 2
if not isinstance(max_value, list):
max_value = [max_value] * 2
if not isinstance(lowdens_percent, list):
lowdens_percent = [lowdens_percent] * 2
if not isinstance(highdens_percent, list):
highdens_percent = [highdens_percent] * 2
if genus2_kwargs is None:
genus2_kwargs = genus_kwargs
# Standardize the intensity values in the images
img1, hdr1 = input_data(img1)
img2, hdr2 = input_data(img2)
img1 = standardize(img1)
img2 = standardize(img2)
self.genus1 = Genus(img1, smoothing_radii=smoothing_radii,
min_value=min_value[0], max_value=max_value[0],
lowdens_percent=lowdens_percent[0],
highdens_percent=highdens_percent[0])
self.genus1.run(**genus_kwargs)
self.genus2 = Genus(img2, smoothing_radii=smoothing_radii,
min_value=min_value[1], max_value=max_value[1],
lowdens_percent=lowdens_percent[1],
highdens_percent=highdens_percent[1])
self.genus2.run(**genus2_kwargs)
# When normalizing the genus curves for the distance metric, find
# the scaling between the angular size of the grids.
self.scale = common_scale(WCS(hdr1), WCS(hdr2))
def distance_metric(self, verbose=False, label1=None, label2=None,
save_name=None, color1='b', color2='g',
marker1='D', marker2='o'):
'''
Data is centered and normalized (via normalize).
The distance is the difference between cubic splines of the curves.
All values are normalized by the area of the image they were
calculated from.
Parameters
----------
verbose : bool, optional
Enables plotting.
label1 : str, optional
Object or region name for img1
label2 : str, optional
Object or region name for img2
save_name : str,optional
Save the figure when a file name is given.
'''
# 2 times the average number between the two
num_pts = \
int((len(self.genus1.thresholds) +
len(self.genus2.thresholds)) / 2)
# Get the min and the max of the thresholds
min_pt = max(np.min(self.genus1.thresholds),
np.min(self.genus2.thresholds))
max_pt = min(np.max(self.genus1.thresholds),
np.max(self.genus2.thresholds))
points = np.linspace(min_pt, max_pt, 2 * num_pts)
# Divide each by the area of the data. genus1 is additionally
# adjusted by the scale factor of the angular size between the
# datasets.
genus1 = self.genus1.genus_stats[0, :] / \
float(self.genus1.data.size / self.scale)
genus2 = self.genus2.genus_stats[0, :] / float(self.genus2.data.size)
interp1 = \
InterpolatedUnivariateSpline(self.genus1.thresholds,
genus1, k=3)
interp2 = \
InterpolatedUnivariateSpline(self.genus2.thresholds,
genus2, k=3)
self.distance = np.linalg.norm(interp1(points) -
interp2(points))
if verbose:
import matplotlib.pyplot as plt
plt.plot(self.genus1.thresholds, genus1, color=color1,
marker=marker1,
label=label1)
plt.plot(self.genus2.thresholds, genus2, color=color2,
marker=marker2,
label=label2)
plt.plot(points, interp1(points), color1)
plt.plot(points, interp2(points), color2)
plt.xlabel("z-score")
plt.ylabel("Genus Score")
plt.grid(True)
plt.legend(loc="best")
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
return self
def GenusDistance(*args, **kwargs):
'''
Old name for the Genus_Distance class.
'''
warn("Use the new 'Genus_Distance' class. 'GenusDistance' is deprecated and will"
" be removed in a future release.", Warning)
return Genus_Distance(*args, **kwargs)
def remove_small_objects(arr, min_size, connectivity=8):
'''
Remove objects less than the given size.
Function is based on skimage.morphology.remove_small_objects
Parameters
----------
arr : numpy.ndarray
Binary array containing the mask.
min_size : int
Smallest allowed size.
connectivity : int, optional
Connectivity of the neighborhood.
'''
struct = nd.generate_binary_structure(arr.ndim, connectivity)
labels, num = nd.label(arr, struct)
sizes = nd.sum(arr, labels, range(1, num + 1))
for i, size in enumerate(sizes):
if size >= min_size:
continue
posns = np.where(labels == i + 1)
arr[posns] = 0
return arr
|
Astroua/TurbuStat
|
turbustat/statistics/genus/genus.py
|
Python
|
mit
| 19,650
|
[
"Gaussian"
] |
b70ebe134e0e95c2ada8cd897d560f909bb6677f2970b082653ad2f9b75bceab
|
"""
Message Queue wrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from pythonjsonlogger.jsonlogger import JsonFormatter as libJsonFormatter
from DIRAC.FrameworkSystem.private.standardLogging.Handler.MessageQueueHandler import MessageQueueHandler
from DIRAC.FrameworkSystem.private.standardLogging.LogLevels import LogLevels
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
DEFAULT_MQ_LEVEL = 'verbose'
# These are the standard logging fields that we want to see
# in the json. All the non default are printed anyway
DEFAULT_FMT = '%(levelname)s %(message)s %(asctime)s'
class MessageQueueBackend(AbstractBackend):
"""
MessageQueueBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have:
- MessageQueueHandler: which is a custom handler created in DIRAC to send
log records to a Message Queue server. You can find it in: FrameworkSys./private/standardlogging/Handler
- BaseFormatter: is a custom Formatter object, created for DIRAC in order to get the appropriate display.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self, backendParams=None):
"""
Initialization of the MessageQueueBackend
"""
# The `Format` parameter is passed as `fmt` to libJsonFormatter
# which uses it to know which "standard" fields to keep in the
# json output. So we need these
if not backendParams:
backendParams = {}
backendParams.setdefault('Format', DEFAULT_FMT)
super(MessageQueueBackend, self).__init__(MessageQueueHandler,
libJsonFormatter,
backendParams,
level=DEFAULT_MQ_LEVEL)
def _setHandlerParameters(self, backendParams=None):
"""
Get the handler parameters from the backendParams.
The keys of handlerParams should correspond to the parameter names of the associated handler.
The method should be overridden in every backend that needs handler parameters.
The method should be called before creating the handler object.
:param dict parameters: parameters of the backend. ex: {'FileName': file.log}
"""
# default values
self._handlerParams['queue'] = ''
if backendParams is not None:
self._handlerParams['queue'] = backendParams.get('MsgQueue', self._handlerParams['queue'])
|
yujikato/DIRAC
|
src/DIRAC/Resources/LogBackends/MessageQueueBackend.py
|
Python
|
gpl-3.0
| 2,542
|
[
"DIRAC"
] |
a8e4f53f1cd711cca6a6f14f7e925f4b0cb61f24e0e716e43ec440b71b6d098b
|
# Copyright (C) 2012-2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008-2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
R"""
This class creates LB-fluid with uniform density ``rho0`` and velocity ``u0`` (*lattice
units*).
Example:
>>> # set initial density and velocity
>>> initDen = 1.
>>> initVel = Real3D( 0. )
>>>
>>> # create initPop object and initialize populations
>>> initPop = espressopp.integrator.LBInitPopUniform(system,lb)
>>> initPop.createDenVel( initDen, initVel )
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.LBInit import *
from _espressopp import integrator_LBInit_PopUniform
class LBInitPopUniformLocal(LBInitLocal, integrator_LBInit_PopUniform):
def __init__(self, system, latticeboltzmann):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_LBInit_PopUniform, system, latticeboltzmann)
if pmi.isController :
class LBInitPopUniform(LBInit):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.LBInitPopUniformLocal',
pmicall = [
"createDenVel"]
)
|
kkreis/espressopp
|
src/integrator/LBInitPopUniform.py
|
Python
|
gpl-3.0
| 1,977
|
[
"ESPResSo"
] |
d082562a037173c1aa612350bd186b7bcd41e5071167d67d3b15e6d565cb2f26
|
import chempy
import copy
from chempy.models import Indexed
from Numeric import *
from Precision import *
class FastModel:
#------------------------------------------------------------------------------
def __init__(self):
self.reset()
#------------------------------------------------------------------------------
def reset(self):
self.nAtom = 0
self.molecule = chempy.Molecule()
self.txta = None
self.inta = None
self.flta = None
self.crda = None
self.bnda = None
#------------------------------------------------------------------------------
def from_indexed(self,model):
self.reset()
self.nAtom = model.nAtom
self.nBond = model.nBond
self.molecule = copy.deepcopy(model.molecule)
self.txta = resize(array(' ','c'),(self.nAtom,as_width))
self.inta = zeros((self.nAtom,ai_width),Int32)
self.flta = zeros((self.nAtom,af_width),Float)
self.bnda = zeros((self.nBond,bi_width),Int32)
self.crda = zeros((self.nAtom,3),Float)
c = 0
for a in model.atom:
txt = "%-4s%-2s%-1s%-4s%-1s%-4s%-1s%-4s%-20s" % \
(a.name[0:4],a.symbol[0:2],a.alt[0:1],a.resn[0:4],
a.resn_code[0:1],a.resi[0:4],a.chain[0:1],
a.segi[0:4],a.text_type[0:20])
self.txta[c] = txt
self.inta[c] = [ a.resi_number, a.hetatm, a.formal_charge,
a.flags, a.color_code, a.stereo, a.numeric_type ]
self.flta[c] = [ a.b, a.q, a.partial_charge, a.vdw ]
self.crda[c] = [ a.coord[0], a.coord[1], a.coord[2] ]
c = c + 1
c = 0
for b in model.bond:
self.bnda[c] = [ b.index[0], b.index[1], b.order, b.stereo ]
c = c + 1
#------------------------------------------------------------------------------
def convert_to_indexed(self):
model = Indexed()
model.molecule = copy.deepcopy(self.molecule)
for c in xrange(self.nAtom):
at = chempy.Atom()
txta = self.txta[c]
for attrib in ( 'name', 'symbol', 'resn', 'resn_code', 'resi',
'alt', 'chain', 'segi', 'text_type' ):
ll = as_[attrib]
setattr(at,attrib,string.strip(string.join(txta[ll[0]:ll[1]],'')))
inta = self.inta[c]
for attrib in ( 'resi_number', 'hetatm', 'formal_charge','flags',
'color_code', 'stereo', 'numeric_type' ):
setattr(at,attrib,inta[ai[attrib]])
flta = self.flta[c]
for attrib in ( 'b', 'q', 'partial_charge' ) :
setattr(at,attrib,flta[af[attrib]])
crda = self.crda[c]
at.coord = [crda[0],crda[1],crda[2]]
# probably need to add some checking here to eliminate values
# which come back as defaults
model.atom.append(at)
for c in xrange(self.nBond):
bnd = chempy.Bond()
bnda = self.bnda[c]
bnd.index = [bnda[bi_index0],bnda[bi_index1]]
bnd.order = bnda[bi_order]
bnd.stereo = bnda[bi_stereo]
model.bond.append(bnd)
return model
#------------------------------------------------------------------------------
# text properties
as_ = {}
as_width = 0
as_['name'] = [ as_width ]
as_width = as_width + 4
as_['name'].append(as_width)
as_['symbol'] = [ as_width ]
as_width = as_width + 2
as_['symbol'].append(as_width)
as_['alt'] = [ as_width ]
as_width = as_width + 1
as_['alt'].append(as_width)
as_['resn'] = [ as_width ]
as_width = as_width + 4
as_['resn'].append(as_width)
as_['resn_code'] = [ as_width ]
as_width = as_width + 1
as_['resn_code'].append(as_width)
as_['resi'] = [ as_width ]
as_width = as_width + 4
as_['resi'].append(as_width)
as_['chain'] = [ as_width ]
as_width = as_width + 1
as_['chain'].append(as_width)
as_['segi' ] = [ as_width ]
as_width = as_width + 4
as_['segi'].append(as_width)
as_['text_type'] = [ as_width ]
as_width = as_width + 20
as_['text_type'].append(as_width)
# integer properties
ai = {}
ai_width = 0
ai['resi_number'] = ai_width
ai_width = ai_width + 1
ai['hetatm'] = ai_width
ai_width = ai_width + 1
ai['formal_charge'] = ai_width
ai_width = ai_width + 1
ai['flags'] = ai_width
ai_width = ai_width + 1
ai['color_code'] = ai_width
ai_width = ai_width + 1
ai['stereo'] = ai_width
ai_width = ai_width + 1
ai['numeric_type'] = ai_width
ai_width = ai_width + 1
# float properties
af = {}
af_width = 0
af['b'] = af_width
af_width = af_width + 1
af['q'] = af_width
af_width = af_width + 1
af['partial_charge'] = af_width
af_width = af_width + 1
af['vdw'] = af_width
af_width = af_width + 1
# bond information
bi_index0 = 0
bi_index1 = 1
bi_order = 2
bi_stereo = 3
bi_width = 4
|
gratefulfrog/lib
|
python/chempy/fast/__init__.py
|
Python
|
gpl-2.0
| 5,069
|
[
"ChemPy"
] |
b78f764b70c7e7f7db595810ccd1d66071917f8250eb17fb0efe8413c06ad4ee
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Base ADF editor
##################
"""
from exatomic import Editor as AtomicEditor
class Editor(AtomicEditor):
def __init__(self, *args, **kwargs):
super(Editor, self).__init__(*args, **kwargs)
if self.meta is None:
self.meta = {'program': 'adf',
'gaussian': False}
else:
self.meta.update({'program': 'adf',
'gaussian': False})
|
avmarchenko/exatomic
|
exatomic/adf/editor.py
|
Python
|
apache-2.0
| 579
|
[
"ADF",
"Gaussian"
] |
afbfce5c667f59bc5ec91b9d49ded90a268ff43c841836fe12b4695a5130b546
|
#!/usr/bin/python
usage = """recover.py [--options] data.pkl"""
description = """written to recover populations of events from poisson series"""
author = "R. Essick"
import os
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
from optparse import OptionParser
#=================================================
figwidth = 15
figheight = 8
axpos = [0.15, 0.15, 0.8, 0.8]
axpos1 = [0.15, 0.15, 0.35, 0.8]
axpos2 = [0.50, 0.15, 0.35, 0.8]
#=================================================
parser = OptionParser(usage=usage, description=description)
parser.add_option("-v", "--verbose", default=False, action="store_true")
parser.add_option("-g", "--grid", default=False, action="store_true")
parser.add_option("", "--max-tau", default=np.infty, type="float")
parser.add_option("-t", "--tag", default="", type="string")
parser.add_option("-o", "--output-dir", default="./", type="string")
opts, args = parser.parse_args()
#=================================================
if len(args) != 1:
raise ValueError("please supply exactly 1 argument")
datafilename = args[0]
if opts.tag:
opts.tag = "_%s"%opts.tag
if not os.path.exists(opts.output_dir):
os.makedirs(opts.output_dir)
#=================================================
if opts.verbose:
print "===================================================="
print " loading data from %s"%(datafilename)
print "===================================================="
file_obj = open(datafilename, "r")
params = pickle.load(file_obj)
data = pickle.load(file_obj)
file_obj.close()
### read off taus
### assumes that we have at least one trial
taus = np.array( sorted([key for key in data[0].keys() if isinstance(key, (int,float)) if key <= opts.max_tau]) )
Ndata = len(data)
Ntaus = len(taus)
### compute expected rates
dur = params["dur"]
rateS = params["rateS"]
rateA = params["rateA"]
rateB = params["rateB"]
rateC = {}
rateCp = {}
rateCm = {}
for tau in taus:
rateC[tau] = rateS + 2*tau*rateA*rateB
rateCp[tau] = 2*tau*(rateA+rateS)*(rateB+rateS)
rate_accident = 2*tau*rateA*rateB
rateCm[tau] = 2*tau*(rateA-rate_accident)*(rateB-rate_accident)
#=================================================
if opts.verbose:
print "===================================================="
print " fitting each noise instantiation"
print "===================================================="
### observed rates
orateC = np.array([[datum[tau]["num_C"]/dur for tau in taus] for datum in data])
orateCp = np.array([[datum[tau]["num_Cp"]/datum[tau]["slideDur"] for tau in taus] for datum in data])
orateCm = np.array([[datum[tau]["num_Cm"]/datum[tau]["slideDur"] for tau in taus] for datum in data])
### fit for Cm
"""
taus2 = np.sum( taus**2 )
taus3 = np.sum( taus**3 )
taus4 = np.sum( taus**4 )
det = taus2*taus4 - taus3**2
ratetaus = np.sum(orateCm * taus, axis=1 )
ratetaus2 = np.sum( orateCm * taus**2, axis=1 )
a = (taus4* ratetaus - taus3*ratetaus2)/det
b = (-taus3*ratetaus + taus2*ratetaus2)/det
### check chi2 for goodness of fit
chi2 = np.sum( (orateCm - np.outer(a, taus) - np.outer(a, taus**2))**2/(np.outer(a, taus) + np.outer(a, taus**2)), axis=1)
if np.any(chi2 > 0.01):
raise StandardError("chi2 is too big for some fits!")
"""
### fit a third order polynomial to relieve pressure on the quadratic term?
### DOES NOT WORK WELL and if anything hurts the measurement of the quadratic term.
_, Cm_b, Cm_a = np.polyfit( taus, np.transpose(orateCm/taus, (1,0)), 2) ### no biases, but big variances
#Cm_b, Cm_a = np.polyfit( taus, np.transpose(orateCm/taus, (1,0)), 1) ### introduces a bias in the quadratic term
### fit for Cp
#Cp_a = np.polyfit( taus, np.transpose(orateCp/taus, (1,0)), 0) ### linear with no offset
Cp_a = 1.0*np.sum(orateCp/taus, axis=1)/Ntaus ### faster computation?
### fit for C
C_a, C_o = np.polyfit( taus, np.transpose(orateC, (1,0)), 1) ### linear with offset
#=================================================
if opts.verbose:
print "plotting fit parameters"
### histogram a, b values
fig = plt.figure(figsize=(figwidth,figheight))
axa = fig.add_axes(axpos1)
axb = fig.add_axes(axpos2)
figS = plt.figure(figsize=(figwidth,figheight))
ax = figS.add_axes(axpos)
figCp = plt.figure(figsize=(figwidth,figheight))
axCp = figCp.add_axes(axpos)
figC = plt.figure(figsize=(figwidth, figheight))
axC1 = figC.add_axes(axpos1)
axC2 = figC.add_axes(axpos2)
nbins = max(Ndata/10, 1)
### plot
axa.hist( 1 - Cm_a/(2*rateA*rateB), nbins, histtype="step" )
axb.hist( 1 - Cm_b/(-4*rateA*rateB*(rateA+rateB)), nbins, histtype="step" )
ax.plot( 1 - Cm_a/(2*rateA*rateB), 1 - Cm_b/(-4*rateA*rateB*(rateA+rateB)), marker="o", markerfacecolor="none", linestyle="none")
axCp.hist( 1 - Cp_a/(2*(rateA+rateS)*(rateB+rateS)), nbins, histtype="step")
ylim = axCp.get_ylim()
x = 1 - rateA*rateB/((rateA+rateS)*(rateB+rateS))
axCp.plot( x*np.ones(2), ylim, "k--")
axCp.text(x, ylim[1], "$1-\\frac{\lambda_A\lambda_B}{\left(\lambda_A+\lambda_S\\right)\left(\lambda_B + \lambda_S\\right)}$", ha="left", va="top")
axC1.hist( 1 - C_a/(2*(rateA*rateB + rateS*rateB + rateA*rateS)), nbins, histtype="step")
axC2.hist( 1 - C_o/rateS, nbins, histtype="step")
### label
axa.set_ylabel("count")
axa.set_xlabel("$1 - \\frac{a}{2\lambda_A\lambda_B}$")
axb.set_ylabel("count")
axb.set_xlabel("$1 - \\frac{b}{-4\lambda_A\lambda_B\left(\lambda_A+\lambda_B\\right)}$")
axb.yaxis.tick_right()
axb.yaxis.set_label_position("right")
ax.set_ylabel("$1 - \\frac{b}{-4\lambda_A\lambda_B\left(\lambda_A+\lambda_B\\right)}$")
ax.set_xlabel("$1 - \\frac{a}{2\lambda_A\lambda_B}$")
axCp.set_ylabel("count")
axCp.set_xlabel("$1 - \\frac{a}{2\left(\lambda_A+\lambda_S\\right)\left(\lambda_B + \lambda_S\\right)}$")
axC1.set_ylabel("count")
axC1.set_xlabel("$1 - \\frac{a}{2\left(\lambda_A\lambda_B+\lambda_A\lambda_S+\lambda_S\lambda_B\\right)}$")
axC2.set_ylabel("count")
axC2.set_xlabel("$1 - \\frac{o}{\lambda_S}$")
axC2.yaxis.tick_right()
axC2.yaxis.set_label_position("right")
### decorate
axa.grid(opts.grid)
axb.grid(opts.grid)
ax.grid(opts.grid)
axCp.grid(opts.grid)
axC1.grid(opts.grid)
axC2.grid(opts.grid)
### save
figname = "%s/Cm-fit_params-hist%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t", figname
fig.savefig(figname)
plt.close(fig)
figname = "%s/Cm-fit_params-scatter%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t", figname
figS.savefig(figname)
plt.close(figS)
figname = "%s/Cp-fit_params-hist%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t", figname
figCp.savefig(figname)
plt.close(figCp)
figname = "%s/C-fit_params-hist%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t", figname
figC.savefig(figname)
plt.close(figC)
#=================================================
"""
perform a hypothesis test on C+ data using fit parameters -> p-values
fit C+ data to extract "(rateA+rateS)*(rateB+rateS)"
perform null test with C- data as distribution. Need to include fitting uncertainty from C+ params
perform a hypothesis test on C data using fit parameters -> p-values
fit C data to extract "rateS" and "rateA*rateB"
perform null test on "rateS" to see whether we can detect a signal this way
perform null test on rateA*rateB to using C- data as distribution. Need to include fitting uncertainty from C params
Can we write a "quick" MCMC for (rateA, rateB, rateS) that attempts to fit the data and recover parameters?
need distributions of errors for each point along the "X vs. tau" curves.
gaussian?
errors are correlated between different points...
Instead, just use a "joint chi2" minimization using all data.
still need relative variances on data points to weight the chi2 appropriately.
"""
#=================================================
"""
Compare the sensitivity of these different methods as a function of rateA, rateB, dur, number of slides, etc.
quote the fraction of trials for which we detected a signal.
rinse and repeat with various parametric combinations -> sigmoid detection curve
rinse and repeat with various dur -> require observing time to detect a given population
"""
|
reedessick/populations
|
recover.py
|
Python
|
gpl-2.0
| 8,214
|
[
"Gaussian"
] |
bd55d1063f830934e240452b64c902f964ec7c90fa4e0b4f5f6498640a471186
|
import io
import os
import re
from distutils.core import setup
def read(path, encoding='utf-8'):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding=encoding) as fp:
return fp.read()
def version(path):
"""Obtain the packge version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
"""
version_file = read(path)
version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
DESCRIPTION = "General tools for Astronomical Time Series in Python"
LONG_DESCRIPTION = """
gatspy: General tools for Astronomical Time Series in Python
============================================================
Gatspy (pronounced as F. Scott Fitzgerald might pronounce it) is a collection of tools for analyzing astronomical time series in Python.
For more information, visit http://github.com/astroml/gatspy/
"""
NAME = "gatspy"
AUTHOR = "Jake VanderPlas"
AUTHOR_EMAIL = "jakevdp@uw.edu"
MAINTAINER = "Jake VanderPlas"
MAINTAINER_EMAIL = "jakevdp@uw.edu"
URL = 'http://github.com/astroml/gatspy'
DOWNLOAD_URL = 'http://github.com/astroml/gatspy'
LICENSE = 'BSD 3-clause'
VERSION = version('gatspy/__init__.py')
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['gatspy',
'gatspy.tests',
'gatspy.periodic',
'gatspy.periodic.tests',
'gatspy.datasets',
'gatspy.datasets.tests',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'],
)
|
astroML/gatspy
|
setup.py
|
Python
|
bsd-2-clause
| 2,320
|
[
"VisIt"
] |
468a4dd5c652f6450b066ce2e5892f19675e19abf39cdec35eae30adf57d0641
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# This example classifies movie reviews as positive or negative using the text of the review,
# and is adapted from
# https://github.com/tensorflow/docs/blob/master/site/en/r1/tutorials/keras/basic_text_classification.ipynb
import tensorflow as tf
from tensorflow import keras
import argparse
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca.learn.tf.estimator import Estimator
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster. local or yarn.')
args = parser.parse_args()
cluster_mode = args.cluster_mode
if cluster_mode == "local":
init_orca_context(cluster_mode="local", cores=4, memory="3g")
elif cluster_mode == "yarn":
init_orca_context(cluster_mode="yarn-client", num_nodes=2, cores=2, driver_memory="3g")
else:
print("init_orca_context failed. cluster_mode should be either 'local' or 'yarn' but got "
+ cluster_mode)
print(tf.__version__)
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
print(train_data[0])
len(train_data[0]), len(train_data[1])
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k: (v + 3) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(train_data[0])
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
len(train_data[0]), len(train_data[1])
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
train_dataset = tf.data.Dataset.from_tensor_slices((partial_x_train, partial_y_train))
validation_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
est = Estimator.from_keras(keras_model=model)
est.fit(data=train_dataset,
batch_size=512,
epochs=100,
validation_data=validation_dataset
)
results = est.evaluate(validation_dataset)
print(results)
stop_orca_context()
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/examples/orca/learn/tf/basic_text_classification/basic_text_classification.py
|
Python
|
apache-2.0
| 5,848
|
[
"ORCA"
] |
a2f14f3379aac7836c398ef246d3b510316dd183bb0c0815dcbe8bdff1b0a846
|
from __future__ import division
from builtins import map
from builtins import str
from builtins import range
import os
import re
import pickle
import logging
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from past.utils import old_div
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline
from PyAstronomy import pyasl
from astropy.io import fits, ascii
#from .interpol_function import interpol
from AtmosInterpol import interpol
from uncertainties import unumpy, ufloat, umath
plt.style.use(['seaborn-muted'])
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
#******************************************************************************
#******************************************************************************
class Vsini:
"""
spec_window is the spectral analysis window, a 1x2 numpy array
gauss is the instrumental broadening parameter
v_macro is the macroturbulence velocity
line_file is the name of the file containing the chosen lines
line is which of the lines on the previous file to work on
SN is the signal-to-noise ratio of the spectrum
# x_vel is the velocity shift to be applied to the x-axis
# x_wl is the wavelengths shift to be applied to the x-axis
# y_add is the additive shift to be applied to the spectrum
# y_mult is the multiplicative shift to be applied to the spectrum
# perf_radius is the number of points around the line center where
# to evaluate the performance of the synthetic spectrum
# bwing_w is the weight to be applied to the blue side of the line
# when evaluating the performance
# rwing_w is the weight to be applied to the red side of the line
# when evaluating the performance
# center_w is the weight to be applied to the line center when
# evaluating the performance
# Maximum number of points around the performance radius that are
# allowed to be a bad fit (1 S/N sigma lower than observed signal)
# If this limit is exceeded, the variable badfit_status will return
# True after running find()
# For high precision spectrum, set this to a very low number
"""
def __init__(self, spec_window, gauss, v_macro, line_file, line, SN,\
**kwargs):
self.name = kwargs.get('star_name', 'Unnamed star')
self.vshift = kwargs.get('x_vel', 0.0)
self.xshift = kwargs.get('x_wl', 0.0)
self.yadd = kwargs.get('y_add', 0.0)
self.ymult = kwargs.get('y_mult', 1.0)
self.radius = kwargs.get('perf_radius', 10)
self.bwing_w = kwargs.get('bwing_w', 3.0)
self.rwing_w = kwargs.get('rwing_w', 5.0)
self.center_w = kwargs.get('center_w', 25.0)
self.badfit_tol = kwargs.get('badfit_tol', 10)
self.c = 2.998E18
self.am = arr_manage()
self.spec = spec_window
self.gauss = gauss
self.v_m = v_macro
self.lines = np.loadtxt(line_file, skiprows=1, usecols=(0, 1))
try:
self.Z = self.lines[line, 1]
self.line_center = self.lines[line, 0]
except IndexError:
self.Z = self.lines[1]
self.line_center = self.lines[0]
self.spec_sigma = 1./SN
self.data = np.loadtxt('./Spectra/%s_%d.dat' % (self.name, line))
self.data_new = self.data
self.line_number = line
# Other attributes that will be properly assigned in other functions
self.data_target = []
self.center_index = 0
self.ci0 = 0
self.ci1 = 0
self.MOOG = None
self.check = None
self.pts = 15
self.pace = np.array([2.0, 2.0])
self.a_guess = np.array([-0.100, 0.100])
self.v_guess = np.array([0.5, 25.0])
self.min_i = 3
self.max_i = 21
self.limits = np.array([0.01, 0.001])
self.plot = True
self.v_low_limit = 0.5
self.save = None
self.silent = True
self.best_a = np.nan
self.best_v = np.nan
self.it = 0
self.finish = False
self.badfit_status = False
self.it2 = 0
self.best_v_antes = np.nan
self.best_a_antes = np.nan
self.v_grid = []
self.S = []
self.S_v = []
self.yfit_v = []
self.intern_u = 0
self.best_v_ind = 0
self.a_grid = []
self.S_a = []
self.yfit_a = []
self.best_a_ind = 0
self.go_v = 0
self.go_a = 0
self.v_change = None
self.a_change = None
self.v_width = np.nan
self.a_width = np.nan
def perf_new(self, v, a, mode='vsini'):
"""
The performance function: first it creates the params.txt file, then runs
moog in silent mode, interpolates the generated model to the points of
the observed spectrum, and then simply calculates the sum of squared
differences, weighted by the inverse of the observed spectrum to the power
of alpha.
"""
data_old = np.copy(self.data)
data_n = np.copy(self.data)
self.data_new[:, 0] = data_n[:, 0] + self.xshift - data_n[:, 0] * \
(old_div(self.c, (self.vshift*1E13 + self.c)) - 1.0)
self.data_new[:, 1] = data_n[:, 1] * self.ymult + self.yadd
self.data_target = self.am.x_set_limits(self.spec[0], self.spec[1],
self.data_new)
self.center_index = self.am.find_index(self.line_center, self.data_target[:, 0])
self.ci0 = self.center_index - self.radius
self.ci1 = self.center_index + self.radius+1
if 2.*self.radius > len(self.data_target[:, 1]):
self.radius = int(np.floor(old_div(len(self.data_target[:, 0]), 2)) - 1)
self.ci0 = self.center_index - self.radius
self.ci1 = self.center_index + self.radius+1
if self.ci1 > len(self.data_target[:, 0]):
resto = int(np.ceil((self.ci1 - len(self.data_target[:, 0]))))
self.radius -= resto
self.ci0 = self.center_index - self.radius
self.ci1 = self.center_index + self.radius+1
if self.ci0 < 0:
self.radius -= (self.radius - self.center_index)
self.ci0 = self.center_index - self.radius
self.ci1 = self.center_index + self.radius+1
if mode == 'vsini':
S = np.inf * np.ones(v.size)
self.MOOG.abunds = a
self.MOOG = self.MOOG.change_vsini(v)
for k, vsini in enumerate(v):
model_v = self.MOOG.model_vsini[str(vsini)]
if ~all(np.isnan(model_v.T[1])):
model_interp = np.interp(self.data_target[self.ci0:self.ci1, 0],\
model_v.T[0], model_v.T[1])
w = np.zeros(2 * self.radius + 1, float)
if self.ci1 > len(self.data_target[:, 0]):
w = np.zeros(2 * self.radius, float)
w[:self.radius-3] = self.bwing_w
w[self.radius+4:] = self.rwing_w
w[self.radius-3:self.radius+4] = self.center_w
S[k] = np.sum(w * (self.data_target[self.ci0:self.ci1, 1] - \
model_interp)**2.) / np.sum(w)
del model_interp, w
del model_v
else:
S = np.inf * np.ones(a.size)
self.MOOG.vsini = v
self.MOOG.model_ab = {}
for k, val in enumerate(a):
self.MOOG = self.MOOG.change_ab(val)
model_a = self.MOOG.model_ab[str(val)]
if ~all(np.isnan(model_a[:, 1])):
model_interp = np.interp(self.data_target[self.ci0:self.ci1, 0],\
model_a[:, 0], model_a[:, 1])
w = np.zeros(2 * self.radius + 1, float)
if self.ci1 > len(self.data_target[:, 0]):
w = np.zeros(2 * self.radius, float)
w[:self.radius-2] = self.bwing_w
w[self.radius+3:] = self.rwing_w
w[self.radius-2:self.radius+3] = self.center_w
S[k] = np.sum(w * (self.data_target[self.ci0:self.ci1, 1] - \
model_interp)**2) / np.sum(w)
del model_interp, w
del model_a
self.data = data_old
del data_old, data_n, self.data_target
return S
def perf(self, p):
data_old = np.copy(self.data)
data_n = np.copy(self.data)
self.data_new[:, 0] = data_n[:, 0] + self.xshift - data_n[:, 0] * \
(old_div(self.c, (self.vshift*1E13 + self.c)) - 1.0)
self.data_new[:, 1] = data_n[:, 1] * self.ymult + self.yadd
self.data_target = self.am.x_set_limits(self.spec[0], self.spec[1],
self.data_new)
# Running MOOGSILENT
self.MOOG.vsini = p[0]
self.MOOG.abunds = p[1]
self.MOOG = self.MOOG.run()
# Evaluating the performance in a radius around the center of the line
self.center_index = self.am.find_index(self.line_center,
self.data_target[:, 0])
self.ci0 = self.center_index - self.radius
self.ci1 = self.center_index + self.radius+1
if 2.*self.radius > len(self.data_target[:, 0]):
self.radius = int(np.floor(old_div(len(self.data_target[:, 0]), 2)) - 1)
self.ci0 = self.center_index - self.radius
self.ci1 = self.center_index + self.radius+1
model_interp = np.interp(self.data_target[self.ci0:self.ci1, 0],
self.MOOG.model[:, 0],
self.MOOG.model[:, 1])
# Checking the fit on line wings
self.check = self.data_target[self.ci0:self.ci1, 1] - model_interp
self.check = len(np.where(self.check > 1.*self.spec_sigma)[0])
# Creating the weights vector
w = np.zeros(2 * self.radius + 1, float)
if self.ci1 > len(self.data_target[:, 0]):
w = np.zeros(2 * self.radius, float)
w[:self.radius] = self.bwing_w
w[self.radius+1:] = self.rwing_w
w[self.radius] = self.center_w
S = old_div(np.sum(w * (self.data_target[self.ci0:self.ci1, 1] - \
model_interp)**2), np.sum(w))
self.data = data_old
del data_old, data_n, model_interp, w
return S
def find(self, **kwargs):
"""
-N: Number of points to try for each iteration
-pace: Narrowing factor when going to the next iteration
pace[0] = narrowing factor for vsini
pace[1] = narrowing factor for abundance
-a_guess: Initial guess range for abundance. It has to be a numpy array of
length = 2
-v_guess: Initial guess range for vsini. It has to be a numpy array of
length = 2
-min_i: Minimum number of iterations
-max_i: Maximum number of iterations
-limits: Convergence limits: a numpy array with length 2, corresponding to the
limits of vsini and abundance, respectively
-plot: Plot the spectral line fit at the end?
-v_low_limit: Lower limit of estimation of vsini
-save: Set 'save' to a filename with an extension (e.g. png, eps)
Overrides 'plot' to False
"""
self.pts = kwargs.get('N', 15)
self.pace = kwargs.get('pace', np.array([2.0, 2.0]))
self.a_guess = kwargs.get('a_guess', np.array([-0.100, 0.100]))
self.v_guess = kwargs.get('v_guess', np.array([0.5, 25.0]))
self.min_i = kwargs.get('min_i', 3)
self.max_i = kwargs.get('max_i', 21)
self.limits = kwargs.get('limits', np.array([0.01, 0.001]))
self.plot = kwargs.get('plot', True)
self.v_low_limit = kwargs.get('v_low_limit', 0.5)
self.save = kwargs.get('save', None)
if 'save' in kwargs:
self.plot = False
self.silent = False
self.best_a = np.mean(self.a_guess)
self.best_v = np.mean(self.v_guess)
self.it = 1
self.finish = False
self.badfit_status = False
MOOG = Driver(synth_interval=self.spec,\
abunds=np.array([[self.Z, self.best_a],]),\
obs_wl=self.data[:, 0], obs_flux=self.data[:, 1],\
gauss=self.gauss, macro_v=self.v_m,\
star_name=self.name, plot=self.plot,\
savefig=self.save,\
y_shift_add=self.yadd,\
y_shift_mult=self.ymult,\
wl_shift=self.xshift,\
line_number=self.line_number)
self.MOOG = MOOG
self.it2 = [0, 0]
while ~self.finish and self.it < self.max_i and self.v_guess[1] < 100.:
self.MOOG.it = self.it
self.best_v_antes = self.best_v
self.best_a_antes = self.best_a
# Evaluating vsini
self.v_grid = np.linspace(self.v_guess[0], self.v_guess[1], self.pts)
self.S = []
self.S = self.perf_new(self.v_grid, self.best_a, mode='vsini')
self.S_v = self.S
tck = UnivariateSpline(self.v_grid, self.S, k=4, s=0.0)#, s = 0.05)
yfit = tck.__call__(self.v_grid)
self.yfit_v = yfit
self.intern_u = [False, False]
try:
z = tck.derivative().roots()
tck2 = tck.derivative(n=2)
z2 = tck2.__call__(z)
i_s = np.where(z2 > 0.)[0]
if i_s.size == 1:
self.best_v = z[i_s[0]]
self.intern_u[0] = True
self.it2[0] += 1
best_l = np.searchsorted(self.v_grid, self.best_v)
best_u = best_l + 1
try:
dif_l = self.best_v - self.v_grid[best_l]
dif_u = self.v_grid[best_u] - self.best_v
if dif_l <= dif_u:
self.best_v_ind = best_l
else:
self.best_v_ind = best_u
except IndexError:
self.best_v_ind = best_l
else:
self.best_v_ind = np.where(self.S == min(self.S))[0][0]
self.best_v = self.v_grid[self.best_v_ind]
del z, tck2, z2
except ValueError:
self.best_v_ind = np.where(self.S == min(self.S))[0][0]
self.best_v = self.v_grid[self.best_v_ind]
del tck, yfit
# Evaluating abundance
self.a_grid = np.linspace(self.a_guess[0], self.a_guess[1], self.pts)
self.a_grid = self.a_grid[np.argsort(self.a_grid)]
self.S = []
self.S = self.perf_new(self.best_v, self.a_grid, mode='abundance')
self.S_a = self.S
tck = UnivariateSpline(self.a_grid, self.S, k=4, s=0.1)
yfit = tck.__call__(self.a_grid)
z = tck.derivative()
self.yfit_a = yfit
try:
z = tck.derivative().roots()
tck2 = tck.derivative(n=2)
z2 = tck2.__call__(z)
i_s = np.where(z2 > 0.)[0]
if i_s.size == 1:
self.best_a = z[i_s[0]]
self.intern_u[1] = True
self.it2[1] += 1
best_l = np.searchsorted(self.a_grid, self.best_a)
best_u = best_l + 1
try:
dif_l = self.best_a - self.a_grid[best_l]
dif_u = self.a_grid[best_u] - self.best_a
if dif_l <= dif_u:
self.best_a_ind = best_l
else:
self.best_a_ind = best_u
except IndexError:
self.best_a_ind = best_l
else:
self.best_a_ind = np.where(self.S == min(self.S))[0][0]
self.best_a = self.a_grid[self.best_a_ind]
del z, tck2, z2
except ValueError:
self.best_a_ind = np.where(self.S == min(self.S))[0][0]
self.best_a = self.a_grid[self.best_a_ind]
del tck, yfit
self.go_v = True
self.go_a = True
# Checking if the best values are too near the edges of the guess
if self.best_v_ind == 0 or self.best_v_ind == (self.pts-1) or \
self.best_v_ind == 1 or self.best_v_ind == (self.pts-2):
self.go_v = False
elif self.best_a_ind == 0 or self.best_a_ind == (self.pts-1) or \
self.best_a_ind == 1 or self.best_a_ind == (self.pts-2):
self.go_a = False
# Calculating changes
self.v_change = np.abs(self.best_v-np.mean(self.v_guess))
self.a_change = np.abs(self.best_a-np.mean(self.a_guess))
if ~self.silent:
if (self.it > self.min_i) and self.go_v and self.go_a\
and (min(self.S) <= self.spec_sigma)\
and(np.abs(self.best_a - self.best_a_antes) <= 0.01)\
and (np.abs(self.best_v - self.best_v_antes) <= 0.01):
self.finish = True
break
elif (self.it > self.min_i) and self.go_v and self.go_a and min(self.S) < 1e-4:
self.finish = True
break
elif self.it > self.min_i and self.intern_u[0]\
and self.intern_u[1] and self.go_v and \
self.go_a and self.it2[0] >= 2 and self.it2[1] >= 2:
self.finish = True
break
else:
if self.it > self.min_i and self.intern_u[0]\
and self.intern_u[1]:
self.finish = True
break
# Setting the new guess. If one of the best values are too near the
# edges of the previous guess, it will not narrow its new guess range.
self.v_width = self.v_guess[1]-self.v_guess[0]
self.a_width = self.a_guess[1]-self.a_guess[0]
if self.go_v:
self.v_guess = np.array([self.best_v-\
old_div(self.v_width, self.pace[0]), self.best_v+\
old_div(self.v_width, self.pace[0])])
else:
self.v_guess = np.array([self.best_v-old_div(self.v_width, 2),\
self.best_v+old_div(self.v_width, 2)])
if self.go_a:
self.a_guess = np.array([self.best_a-\
old_div(self.a_width, self.pace[1]), self.best_a+\
old_div(self.a_width, self.pace[1])])
if np.abs(self.a_guess[1] - self.a_guess[0]) < 0.05:
self.a_guess = np.array([self.best_a - 0.025,\
self.best_a + 0.025])
# Checking if the v_guess contains vsini lower than v_low_limit.
# If True, it will add a value to the array so that the lower limit
# is equal to the v_low_limit
if self.v_guess[0] < self.v_low_limit and ~self.silent:
self.v_guess[0] += self.v_low_limit-self.v_guess[0]
if self.a_guess[0] < -3.0 and ~self.silent:
self.a_guess[0] = -3.0#+ (-3.0 - self.a_guess[0])
self.it += 1
# Finalizing the routine
self.S = self.perf(np.array([self.best_v, self.best_a]))
# Trigger bad fit warning
if self.check > self.badfit_tol:
self.badfit_status = True
del MOOG
return self
#******************************************************************************
#******************************************************************************
class Driver:
"""
The MOOG driver object.
Parameters
----------
synth_interval : sequence
The synthesis wavelength interval lower and upper limits in angstrons.
Example: (6000, 6100).
abunds : ``numpy.array``
The atomic number (first column) and the abundance (second column) of
the elements to be synthetisized.
Example: numpy.array([[26, -0.05], [32, 0.01]])
step: float, optional
The wavelength step size of the synthesis. Default is 0.1.
opac: float, optional
Wavelength point to consider opacity contributions from neighboring
transitions. Default is 2.0.
wl_shift: float, optional
Wavelength shift to be applied to the observed spectrum. Default is 0.
v_shift: float, optional
Doppler velocity shift to be applied to the observed spectrum. Default
is 0.
y_shift_add: float, optional
Additive shift to be applied to the observed spectrum. Default is 0.
y_shift_mult: float, optional
Multiplicative factor to be applied to the observed spectrum. Default
is 1.0 (no modification).
gauss: float, optional
Value of the 1 sigma dispersion of the Gaussian smoothing to be applied
to the synthetic spectrum. Default is 0.
lorentz: float, optional
Default is 0.
eps: float, optional
Limb darkening coefficient. Default is 0.6.
macro_v: float, optional
Macroturbulence velocity of the star. Default is 0.
vsini: float, optional
The projected rotational velocity of the star. Default is 0.
linelist_in: str, optional
Name of the line list input file. Default is 'lines.dat'.
observed_in: str, optional
Name of the input file containing the observed spectrum. Default is
'spectrum.dat'.
atmosphere: int, optional
Default is 1.
molecules: int, optional
Default is 1.
trudamp: int, optional
Default is 1.
lines: int, optional
Default is 1.
flux: int, optional
Default is 0.
damping: int, optional
Default is 0.
star_name: str, optional
Self-explanatory. Default is 'Unnamed star'.
"""
def __init__(self, synth_interval, abunds, obs_wl, obs_flux, step=0.01, opac=2.0,
wl_shift=0.0, v_shift=0.0, y_shift_add=0.0, y_shift_mult=1.0,
gauss=0.0, lorentz=0.0, eps=0.6, macro_v=0.0, vsini=0.0,
observed_in='spectrum.dat',
atmosphere=1, molecules=1, trudamp=1, lines=1, flux=0,
damping=0, star_name='Unnamed star', plot=True, savefig=False,
line_number=0):
self.name = star_name
self.plot_switch = plot
self.savefig = savefig
# Output files
self.standard_out = './output/%s_l.out' % self.name
self.summary_out = './output/%s_li.out' % self.name
self.smoothed_out = './output/%s_s.out' % self.name
self.smoothed_out_new = './output/%s_sn.out' % self.name
# Input files
self.model_in = './atm_models/%s_v.atm' % self.name
self.lines_in = './MOOG_linelist/lines.%s_v.txt' % self.name
self.observed_in = './Spectra/%s_%d.dat' % (self.name, line_number)
# Output files
self.standard_out_moog = './output/%s_l.out' % self.name
self.summary_out_moog = './output/%s_li.out' % self.name
self.smoothed_out_moog = './output/%s_s.out' % self.name
self.smoothed_out_new_moog = './output/%s_sn.out' % self.name
# Input files
self.model_in_moog = './atm_models/%s_v.atm' % self.name
self.lines_in_moog = './MOOG_linelist/lines.%s_v.txt' % self.name
self.observed_in_moog = './Spectra/%s_%d.dat' % (self.name, line_number)
self.lines_ab = np.loadtxt(self.lines_in, usecols=(0,), skiprows=1)
# Synthesis parameters
self.syn_start = synth_interval[0]
self.syn_end = synth_interval[1]
self.wl_start = synth_interval[0]
self.wl_end = synth_interval[1]
self.step = step
self.opac = opac
self.wl_shift = wl_shift
if int(v_shift) != 0:
raise NotImplementedError('Doppler shift in the observed spectrum'
'is not implemented yet.')
self.v_shift = v_shift
self.y_shift_add = y_shift_add
self.y_shift_mult = y_shift_mult
self.gauss = gauss
self.lorentz = lorentz
self.dark = eps
self.macro_v = macro_v
self.vsini = vsini
self.N, self.n_cols = np.shape(abunds)
assert(self.n_cols == 2), 'Number of columns in `abunds` must be 2.'
if self.N == 1:
self.Z = abunds[0][0]
self.abunds = abunds[0][1]
elif self.N > 1:
self.Z = abunds[:, 0]
self.abunds = abunds[:, 1]
# MOOG synth options
self.atm = atmosphere
self.mol = molecules
self.tru = trudamp
self.lin = lines
self.flu = flux
self.dam = damping
# Reading the observed spectrum
if isinstance(observed_in, str):
self.obs_wl = obs_wl + wl_shift
self.obs_flux = obs_flux * y_shift_mult + y_shift_add
elif observed_in is None:
self.observed_in = observed_in
else:
raise TypeError('observed_in must be ``str`` or ``None``.')
self.data = np.array([self.obs_wl, self.obs_flux]).T
self.it = 0
self.c = 2.998E5 # km/s
self.model_ab = {}
self.model_vsini = {}
self.model = []
self.index = 0
self.start_index = 0
self.end_index = 0
def create_batch(self):
"""
Writes the MOOG driver file batch.par
"""
with open('./MOOGFEB2017/%s_synth.par' % self.name, 'w') as f:
f.truncate()
f.write("synth\n")
f.write("standard_out '%s'\n" % self.standard_out_moog)
f.write("summary_out '%s'\n" % self.summary_out_moog)
f.write("smoothed_out '%s'\n" % self.smoothed_out_moog)
f.write("model_in '%s'\n" % self.model_in_moog)
f.write("lines_in '%s'\n" % self.lines_in_moog)
f.write("observed_in '%s'\n" % self.observed_in_moog)
f.write("atmosphere %i\n" % self.atm)
f.write("molecules %i\n" % self.mol)
f.write("trudamp %i\n" % self.tru)
f.write("lines %i\n" % self.lin)
f.write("flux/int %i\n" % self.flu)
f.write("damping %i\n" % self.dam)
f.write("freeform 0\n")
f.write("plot 3\n")
f.write("abundances %i 1\n" % self.N)
if self.N > 1:
for k in range(self.N):
f.write(" %i %f\n" % (self.Z[k], self.abunds[k]))
else:
f.write(" %i %f\n" % (self.Z, self.abunds))
f.write("isotopes 0 1\n")
f.write("synlimits\n")
f.write(" %.2f %.2f %.2f %.1f\n" % (self.syn_start, self.syn_end,
self.step, self.opac))
f.write("obspectrum 5\n")
f.write("plotpars 1\n")
f.write(" %.2f %.2f 0.05 1.05\n" % (self.wl_start, self.wl_end))
f.write(" %.4f %.4f %.3f %.3f\n" % (self.v_shift, self.wl_shift,
self.y_shift_add,
self.y_shift_mult))
f.write(" gm %.3f 0.0 %.1f %.2f %.1f" % (self.gauss,
self.dark,
self.macro_v,
self.lorentz))
del f
def change_vsini(self, grid_v):
self.create_batch()
os.system('MOOGSILENT > temp.log 2>&1 << EOF\nMOOGFEB2017/%s_synth.par\n\nEOF' % self.name)
try:
model = np.loadtxt(self.smoothed_out, skiprows=2)
synth_wl = model[:, 0]
synth_flux = model[:, 1]
except:
model = []
synth_wl = np.arange(self.syn_start, self.syn_end, self.step)
synth_flux = np.zeros(synth_wl.size)
inonan = np.where(np.isfinite(synth_flux))[0]
if len(np.unique(synth_flux[inonan])) > 1:
synth_wl, synth_flux, self.obs_wl, self.obs_flux = \
self.smart_cut(synth_wl, synth_flux, self.obs_wl, self.obs_flux)
self.model_vsini = {}
for vsini in grid_v:
self.vsini = vsini
if self.vsini > 0.0:
conv_flux = pyasl.rotBroad(synth_wl, synth_flux, self.dark, self.vsini)
self.model_vsini[str(vsini)] = np.array([synth_wl, \
conv_flux/max(conv_flux)]).T
del conv_flux
else:
self.model_vsini[str(vsini)] = np.array([synth_wl,
np.nan*np.ones(synth_wl.size)]).T
del model, synth_wl, synth_flux
return self
def change_ab(self, a):
self.abunds = a
self.create_batch()
os.system('MOOGSILENT > temp.log 2>&1 << EOF\nMOOGFEB2017/%s_synth.par\n\nEOF' % self.name)
try:
model = np.loadtxt(self.smoothed_out, skiprows=2)
synth_wl = model[:, 0]
synth_flux = model[:, 1]
except:
model = []
synth_wl = np.arange(self.syn_start, self.syn_end, self.step)
synth_flux = np.zeros(synth_wl.size)
if self.vsini > 0.0:
inonan = np.where(np.isfinite(synth_flux))[0]
if len(np.unique(synth_flux[inonan])) > 1:
synth_wl, synth_flux, self.obs_wl, self.obs_flux = \
self.smart_cut(synth_wl, synth_flux, self.obs_wl, self.obs_flux)
conv_flux = pyasl.rotBroad(synth_wl, synth_flux, self.dark, self.vsini)
self.model_ab[str(a)] = np.array([synth_wl, old_div(conv_flux, max(conv_flux))]).T
del conv_flux
else:
self.model_ab[str(a)] = np.array([synth_wl, np.nan * np.ones(synth_wl.size)]).T
del synth_wl, synth_flux, model
return self
def run(self):
"""
Used to run MOOG silent.
"""
self.create_batch()
os.system('MOOGSILENT > temp.log 2>&1 << EOF\nMOOGFEB2017/%s_synth.par\n\nEOF' % self.name)
try:
self.model = np.loadtxt(self.smoothed_out, skiprows=2)
synth_wl = self.model[:, 0]
synth_flux = self.model[:, 1]
except:
self.model = []
synth_wl = np.arange(self.syn_start, self.syn_end, self.step)
synth_flux = np.zeros(synth_wl.size)
if self.vsini > 0.0:
inonan = np.where(np.isfinite(synth_flux))[0]
if len(np.unique(synth_flux[inonan])) > 1:
synth_wl, synth_flux, self.obs_wl, self.obs_flux = \
self.smart_cut(synth_wl, synth_flux, self.obs_wl, self.obs_flux)
conv_flux = pyasl.rotBroad(synth_wl, synth_flux, self.dark, self.vsini)
self.model = np.array([synth_wl, old_div(conv_flux, max(conv_flux))]).T
del conv_flux
del synth_wl, synth_flux
return self
def rot_prof(self, vz):
"""
This function creates a rotational profile based on Gray (2005).
Parameters
----------
vz : ``numpy.array``
The Doppler velocities from the spectral line center.
Returns
-------
profile : ``numpy.array``
The rotational profile.
"""
n = len(vz)
profile = np.zeros(n, float)
m = np.abs(vz) < self.vsini
profile[m] = old_div((2.*(1.-self.dark)*(1.-(old_div(vz[m], self.vsini)) ** 2.)
** 0.5 + 0.5 * np.pi * self.dark *
(1. - (old_div(vz[m], self.vsini)) ** 2.)), \
(np.pi * self.vsini * (1. - self.dark / 3.)))
del m
return profile
@staticmethod
def smart_cut(wl, flux, obs_wl, obs_flux):
"""
smart_cut() is used to prepare the synthetic spectrum for a convolution
with the rotational profile.
"""
ind0 = np.where(flux == min(flux))[0][0]
n = len(wl)
if ind0 < old_div((n - 1), 2):
if (ind0 + 1) % 2 == 0:
wl = wl[1:2 * ind0]
flux = flux[1:2 * ind0]
obs_flux = obs_flux[1:2 * ind0]
obs_wl = obs_wl[1:2 * ind0]
else:
wl = wl[0:2 * ind0 + 1]
flux = flux[0:2 * ind0 + 1]
obs_flux = obs_flux[0:2 * ind0 + 1]
obs_wl = obs_wl[0:2 * ind0 + 1]
elif ind0 > old_div((n - 1), 2):
if (ind0 + 1) % 2 == 0:
wl = wl[2*(ind0 - old_div((n - 1), 2)) + 1:-1]
flux = flux[2 * (ind0 - old_div((n - 1), 2)) + 1:-1]
obs_flux = obs_flux[2 * (ind0 - old_div((n - 1), 2)) + 1:-1]
obs_wl = obs_wl[2 * (ind0 - old_div((n - 1), 2)) + 1:-1]
else:
wl = wl[2 * (ind0 - old_div((n - 1), 2)):]
flux = flux[2 * (ind0 - old_div((n - 1), 2)):]
obs_flux = obs_flux[2 * (ind0 - old_div((n - 1), 2)):]
obs_wl = obs_wl[2 * (ind0 - old_div((n - 1), 2)):]
return wl, flux, obs_wl, obs_flux
#******************************************************************************
#******************************************************************************
#******************************************************************************
#******************************************************************************
class arr_manage:
"""
Used to perform a series of specific management routines to
numpy-arrays and data arrays and files.
"""
def __init__(self):
self.index = 0
self.start_index = 0
self.end_index = 0
def find_index(self, target, array):
"""
This routine finds the index of a value closest to the target in
a numpy-array.
"""
self.index = np.searchsorted(array, target, side='left')
return self.index
def x_set_limits(self, start, end, data2d):
"""
This routine returns a section of an array given the start and end
values. These values do not need to be the exact ones found in the
array.
"""
self.start_index = np.searchsorted(data2d[:, 0], start, side='left')
self.end_index = np.searchsorted(data2d[:, 0], end, side='left')
return data2d[self.start_index:self.end_index]
#******************************************************************************
#******************************************************************************
#******************************************************************************
#******************************************************************************
def calc_broadening(starname, Teff, met, logg, micro, ab_ni,\
err_T, err_logg, err_met, err_ni, snr, alias):
vmac, err_vmac = calc_vmac((Teff, err_T), (logg, err_logg))
#vmac, err_vmac = np.ones(len(vmac))*0.1, np.ones(len(vmac))*0.1
vsini, err_vsini, weight_vsini = calc_vsini(starname, Teff, met, logg, micro, vmac,\
ab_ni, err_met, err_ni, snr, alias)
if (vsini.size == 0) or np.all(vsini == 0.0) or np.all(err_vsini == 0.0):
i = np.where((vmac != 0.0) & (err_vmac != 0.0))[0]
vmac_final = np.median(vmac[i])
err_vmac_final = old_div(np.median(err_vmac[i]), np.sqrt(float(len(i))))
vsini_final = 0.0
err_vsini_final = 0.0
else:
i = np.where((vmac != 0.0) & (vsini != 0.0) & (err_vmac != 0.0) & (err_vsini != 0.0))[0]
#vmac_final = np.median(vmac[i])
#err_vmac_final = old_div(np.median(err_vmac[i]), np.sqrt(float(len(i))))
vm = np.median(unumpy.uarray(vmac[i], err_vmac[i]))
vmac_final = vm.n
err_vmac_final = vm.s
#vsini_final = np.average(vsini[i], weights=weight_vsini[i])
#err_vsini_final = np.sqrt(1./(1./(np.sum(err_vsini[i]**2.) + err_vmac_final**2.)))
v = np.median(unumpy.uarray(vsini[i], err_vsini[i])) + ufloat(0.0, err_vmac_final)
vsini_final = v.n
err_vsini_final = v.s
del i, vmac, err_vmac, vsini, err_vsini
if np.isnan(err_vsini_final):
err_vsini_final = 0.0
return vsini_final, err_vsini_final, vmac_final, err_vmac_final
#******************************************************************************
#******************************************************************************
#******************************************************************************
#******************************************************************************
def calc_vmac(Teff, logg):
"""
Computes vmac
"""
vmac_sun = np.array([3.0, 3.2, 3.1, 3.6, 2.9])
vmac = vmac_sun - 0.00707*Teff[0] + 9.2422*10**(-7.)*Teff[0]**2. + \
10.0 - 1.81*(logg[0] - 4.44) - 0.05
err_vmac = np.ones(5)*np.sqrt(0.1**2. + (9.2422*10**(-7.)*2*Teff[0]-0.00707)**2.*Teff[1]**2. +\
1.81**2.*logg[1]**2. + (logg[0] - 4.44)**2.*0.26**2. + 0.03**2.)
if np.mean(vmac) < 0.5:
#From Brewer et al. 2016:
teff = ufloat(Teff[0], Teff[1])
log_g = ufloat(logg[0], logg[1])
if log_g.n >= 4.0:
vmac = 2.202*umath.exp(0.0019*(teff - 5777.)) + 1.30
elif (4.0 > log_g.n >= 3.0):
vmac = 1.166*umath.exp(0.0028*(teff - 5777.)) + 3.30
else:
vmac = 4.0 + ufloat(0.0, 0.25)
logging.info(vmac.n + np.zeros(5))
logging.info(vmac.s + np.zeros(5))
return vmac.n + np.zeros(5), vmac.s + np.zeros(5)#, err_vmac
# E.1 from Melendez et al. 2012:
#vmac=np.zeros(5)+(13.499-0.00707*Teff[0]+9.2422*10**(-7)*Teff[0]**2.)
#err_vmac=np.zeros(5)+np.sqrt((2.*9.2422*10**(-7)*Teff[0]-0.00707)**2.*Teff[1]**2.)
# E.2 from Melendez et al. 2012
#vmac=np.zeros(5)+(3.50+(Teff[0]-5777.)/650.)
#err_vmac=np.zeros(5)+np.sqrt((1./650.)**2.*Teff[1]**2.)
#From Brewer et al. 2016:
#teff = ufloat(Teff[0], Teff[1])
#log_g = ufloat(logg[0], logg[1])
#if log_g.n >= 4.0:
# vmac = 2.202*umath.exp(0.0019*(teff - 5777.)) + 1.30
#elif (4.0 > log_g.n >= 3.0):
# vmac = 1.166*umath.exp(0.0028*(teff - 5777.)) + 3.30
#else:
# vmac = 4.0 + ufloat(0.0, 0.25)
#return vmac.n + np.zeros(5), vmac.s + np.zeros(5)#, err_vmac
return vmac, err_vmac
#******************************************************************************
#******************************************************************************
#******************************************************************************
#******************************************************************************
def calc_vsini(starname, Teff, met, logg, micro, v_macro, ab_ni,
err_met, err_ni, snr=100., alias='test'):
def f_polinomial(x, a, b):
return a*x + b
def f_gauss(x, a, b, c):
return a*np.exp(old_div(-(x - b)**2., (2.*c**2.)))
def f_total(x, a, b, c, d, e):
return f_gauss(x, a, b, c) + f_polinomial(x, d, e)
def continuum_det2(x, y, snr): # Substracts the continuum and normalizes
rejt = 1.-1./snr
p = np.poly1d(np.polyfit(x, y, 2))
ynorm = p(x)
for _ in range(3):
dif = np.hstack((np.abs((y[:-1]-y[1:])/y[:-1]), [1.0]))
i = np.where(((y-ynorm*rejt) > 0) & (dif < 0.1))[0]
vecx = x[i]
vecy = y[i]
p = np.poly1d(np.polyfit(vecx, vecy, 2))
ynorm = p(x)
yfit = p(x)
del p, vecx, vecy
if np.any(yfit <= 0.0):
i_nonzero = np.where(y > (min(y) + 0.05*(max(y)-min(y))))[0]
xmed = min(x) + (max(x)-min(x))*0.5
i_in_line = np.where((x >= (xmed-1.0)) & (x <= (xmed+1.0)))[0]
if np.any(np.isin(i_in_line, i_nonzero)):
logging.info('line not visible')
yfit = np.zeros(len(y))
del i_nonzero, i_in_line
return np.array(y/yfit)
# Estimate error in vsini by seeing how the vsini estimation changes
# when changing the flux data by its noise
def best_S(vrot, S):
tck = UnivariateSpline(vrot.v_grid, S, k=4, s=0.0)#, s = 0.05)
new_grid = np.linspace(vrot.v_grid[0], vrot.v_grid[-1], 100)
intern_u = [False, False]
Sfit = tck(new_grid)
try:
z = tck.derivative().roots()
tck2 = tck.derivative(n=2)
z2 = tck2.__call__(z)
i_s = np.where(z2 > 0.)[0]
if i_s.size == 1:
best_v = z[i_s[0]]
intern_u[0] = True
#self.it2[0] += 1
best_l = np.searchsorted(vrot.v_grid, best_v)
best_u = best_l + 1
try:
dif_l = best_v - vrot.v_grid[best_l]
dif_u = vrot.v_grid[best_u] - best_v
if dif_l <= dif_u:
best_v_ind = best_l
else:
best_v_ind = best_u
except IndexError:
best_v_ind = best_l
Smin = S[best_v_ind]
else:
#best_v_ind = np.where(S == min(S))[0][0]
#best_v = vrot.v_grid[best_v_ind]
Sfit = tck(new_grid)
best_v_ind = np.argmin(Sfit)
best_v = new_grid[best_v_ind]
Smin = Sfit[best_v_ind]
del z, tck2, z2
except ValueError:
#best_v_ind = np.where(S == min(S))[0][0]
#best_v = vrot.v_grid[best_v_ind]
best_v_ind = np.argmin(Sfit)
best_v = new_grid[best_v_ind]
Smin = Sfit[best_v_ind]
del tck, Sfit, new_grid
return best_v, Smin
def get_S(vrot, ynoise):
if 2.*vrot.radius > len(ynoise[:, 1]):
vrot.radius = int(np.floor(old_div(len(ynoise[:, 0]), 2)) - 1)
vrot.ci0 = vrot.center_index - vrot.radius
vrot.ci1 = vrot.center_index + vrot.radius+1
if vrot.ci1 > len(ynoise[:, 0]):
resto = int(np.ceil((vrot.ci1 - len(ynoise[:, 0]))))
vrot.radius -= resto
vrot.ci0 = vrot.center_index - vrot.radius
vrot.ci1 = vrot.center_index + vrot.radius+1
if vrot.ci0 < 0:
vrot.radius -= (vrot.radius - vrot.center_index)
vrot.ci0 = vrot.center_index - vrot.radius
vrot.ci1 = vrot.center_index + vrot.radius+1
v = vrot.v_grid
a = vrot.best_a
S = np.inf * np.ones(v.size)
vrot.MOOG.abunds = a
#self.MOOG = self.MOOG.change_vsini(v)
for k, vsini in enumerate(v):
model_v = vrot.MOOG.model_vsini[str(vsini)]
if ~all(np.isnan(model_v.T[1])):
model_interp = np.interp(ynoise[vrot.ci0:vrot.ci1, 0],\
model_v.T[0], model_v.T[1])
w = np.zeros(2 * vrot.radius + 1, float)
if vrot.ci1 > len(ynoise[:, 0]):
w = np.zeros(2 * vrot.radius, float)
w[:vrot.radius-3] = vrot.bwing_w
w[vrot.radius+4:] = vrot.rwing_w
w[vrot.radius-3:vrot.radius+4] = vrot.center_w
S[k] = np.sum(w * (ynoise[vrot.ci0:vrot.ci1, 1] - \
model_interp)**2.) / np.sum(w)
del model_interp, w
del model_v
return S
def noise_estimation(vrot, snr, N=1000):
vsini = np.nan*np.ones(N)
ydata = vrot.data_target
minSnoise = np.nan*np.ones(N)
noise_array = np.zeros((N,len(ydata)))
for i in range(len(ydata)):
noise_array.T[i] = np.random.normal(0.0, np.abs(ydata[:,1][i]/snr), N)
for i in range(N):
ynoise = np.copy(ydata)
ynoise[:,1] = ydata[:,1]+noise_array[i]
Snoise = get_S(vrot, ynoise)
vsini[i], minSnoise[i] = best_S(vrot, Snoise)
return vsini, minSnoise
# Set lines
# Creates dic with characteristics of lines
lines1 = {'name' : 'FeI', 'wave' : 6027.05, 'Z' : 26, 'EP' : 4.076, 'loggf' : -1.09}
lines2 = {'name' : 'FeI', 'wave' : 6151.62, 'Z' : 26, 'EP' : 2.176, 'loggf' : -3.30}
lines3 = {'name' : 'FeI', 'wave' : 6165.36, 'Z' : 26, 'EP' : 4.143, 'loggf' : -1.46}
lines4 = {'name' : 'FeI', 'wave' : 6705.10, 'Z' : 26, 'EP' : 4.607, 'loggf' : -0.98}
lines5 = {'name' : 'NiI', 'wave' : 6767.77, 'Z' : 28, 'EP' : 1.826, 'loggf' : -2.17}
lines_o = (lines1, lines2, lines3, lines4, lines5)
# Create model atmosphere
interpol(starname, Teff, logg, met, micro, alias+'_v')
inst = starname[starname.index('_')+1:]
lines_ab, dev_ab = calc_ab(starname, err_met, err_ni, alias)
line_file = './MOOG_linelist/lines.%s_v.txt' % alias
try:
x, data = pyasl.read1dFitsSpec('./Spectra/%s_res.fits' % starname)
except:
hdu = fits.open('./Spectra/%s_res.fits' % starname)
d = hdu[0].data
x = d[0]
data = d[1]
hdu.close()
del hdu, d
new_data = np.array([x, data]).T
resolution = None
try:
resolution = fits.getval('./Spectra/%s_res.fits' % starname, 'R', 0)
except (IndexError, KeyError):
pass
# For each line create vsini object
vsini_lines = np.zeros(5)
err_vsini_lines = np.zeros(5)
weight_lines = np.zeros(5)
ab_keys = list(map(float, list(lines_ab.keys())))
data_lines = {}
available_lines = np.loadtxt(line_file, skiprows=1, usecols=(0))
lines = []
for l in lines_o:
if l['wave'] in available_lines:
lines.append(l)
for l, li in enumerate(lines):
data_o = new_data[:]
w = li['wave']
if w in ab_keys:
info_line = {}
info_line['vmac'] = v_macro[l]
kwargs = {'star_name' : alias}
j = (x > (w - 3.0)) & (x < (w + 3.0))
#j = (x > (w - 2.0)) & (x < (w + 2.0))
x_l = x[j]
y_l = data[j]
if x_l.size == 0 or y_l.size == 0:
continue
k = (x > (w - 0.5)) & (x < (w+0.5))
#k = (x > (w - 1.0)) & (x < (w+1.0))
i_r = int(np.floor(old_div(len(x[k]), 2)))
kwargs['perf_radius'] = i_r
try:
new_y_l = continuum_det2(x_l, y_l, snr)
popt, _ = curve_fit(f_total, x_l, new_y_l,
p0=(old_div(-max(y_l), min(y_l)), w, 0.2, 0.0, max(y_l)))
if popt[1] > (w + 0.3) or popt[1] < (w - 0.3):
x_shift = 0.0
else:
x_shift = w - popt[1]
new_x_l = x_l + x_shift
del popt
except (RuntimeError, TypeError, ValueError):
try:
popt, _ = curve_fit(f_total, x[j], data[j],
p0=(old_div(-max(data[k]), min(data[k])),
w, 0.2, 0.0, max(data[j])))
y_polinomial = f_polinomial(x_l, *popt[3:])
if popt[1] > (w + 0.3) or popt[1] < (w - 0.3):
x_shift = 0.0
else:
x_shift = w - popt[1]
new_x_l = x_l + x_shift
new_y_l1 = old_div(y_l, f_total(x_l, *popt))
popt2, _ = curve_fit(f_polinomial, new_x_l, new_y_l1, p0=(0.0, 1.0))
y_polinomial2 = f_polinomial(new_x_l, *popt2)
new_y_l = old_div(y_l, y_polinomial/y_polinomial2)
del popt2, y_polinomial, y_polinomial2, new_y_l1, popt
except RuntimeError:
new_x_l = x_l[:]
new_y_l = y_l[:]
kwargs['badfit_tol'] = 50
ascii.write([new_x_l, new_y_l], './Spectra/%s_%d.dat' % (alias, l),\
format='fixed_width_no_header', overwrite=True, delimiter='\t')
SN = snr
del j, x_l, y_l, new_x_l, new_y_l
if resolution is None:
if inst == 'harps':
gauss = w/115000.
elif inst in ['feros', 'feros_o']:
gauss = w/48000.
elif inst == 'uves':
gauss = w/110000.
elif inst in ['hires', 'HIRES']:
gauss = w/67000.
elif inst == 'coralie':
gauss = w/60000.
elif inst == 'psf':
gauss = w/38000.
else:
x_ = ascii.read('./Spectra/%s_%d.dat' % (alias, l))['col1']
R = min(np.mean(x_)/np.mean(x_[1:]-x_[:-1]), 150000.)
gauss = w/R
#print(R)
del x_, R
else:
gauss = w/float(resolution)
spec_window = np.array([w-1.0, w+1.0])
if v_macro[l] > 0.0:
vmacro = v_macro[l]
else:
vmacro = 0.1
info_line['vmac'] = vmacro
vrot = Vsini(spec_window, gauss, vmacro, line_file, l, SN, **kwargs)
logging.info('Working on line %.3f, line abundance is %.3f, a_guess is [%.3f, %.3f]', \
w, lines_ab['%.3f' % w], \
(lines_ab['%.3f' % w]) - 2.5*max(0.1, dev_ab['%.3f' % w]),\
(lines_ab['%.3f' % w]) + 2.5*max(0.1, dev_ab['%.3f' % w]))
kwargs2 = {'a_guess' : np.array([(lines_ab['%.3f' % w])\
-2.5*max(0.1, dev_ab['%.3f' % w]),
(lines_ab['%.3f' % w])\
+ 2.5*max(0.1, dev_ab['%.3f' % w])]),\
'v_guess' : np.array([0.1, 25.]),\
'save' : True,\
'N' : 30,\
'v_low_limit' : 0.1,\
'max_i' : 30}
vrot = vrot.find(**kwargs2)
info_line['data'] = vrot.MOOG.data
info_line['model'] = vrot.MOOG.model
info_line['vsini'] = vrot.best_v
info_line['v_grid'] = vrot.v_grid
info_line['S_v'] = vrot.S_v
info_line['a_grid'] = vrot.a_grid
info_line['S_a'] = vrot.S_a
info_line['abundance'] = vrot.best_a
info_line['yfit_v'] = vrot.yfit_v
info_line['yfit_a'] = vrot.yfit_a
#Compare fit with a straight line
ilm = np.where(np.abs(vrot.MOOG.model[:, 0]-w) <= 1.0)[0]
il = np.where(np.abs(vrot.MOOG.data[:, 0]-w) <= 1.0)[0]
model_interp = UnivariateSpline(vrot.MOOG.model[:, 0][ilm],
vrot.MOOG.model[:, 1][ilm],
s=0, k=5)(vrot.MOOG.data[:, 0][il])
S_model = np.sum((vrot.MOOG.data[:, 1][il]-model_interp)**2.)
S_line = np.sum((vrot.MOOG.data[:, 1][il]-1.0)**2.)
if S_line < S_model:
vrot.badfit_status = True
del model_interp, S_model, S_line
info_line['badfit'] = vrot.badfit_status
data_lines[str(w)] = info_line
del info_line
if ~vrot.badfit_status:
#vsini_lines[l] = vrot.best_v
#err_vsini_lines[l] = np.sqrt(vrot.S)
weight_lines[l] = 1./vrot.S
vsini_dist, minSnoise = noise_estimation(vrot, snr, N=1000)
p = np.percentile(vsini_dist, [16, 50, 84])
vsini_lines[l] = p[1]
err_vsini_lines[l] = max(p[1]-p[0], p[2]-p[1])
data_lines[str(w)]['vsini_dist'] = vsini_dist
del vsini_dist
del vrot
del kwargs, spec_window, kwargs2
os.system('rm -f ./Spectra/%s_%d.dat' % (alias, l))
new_data = data_o
del data_o
f = open('./plots_broadening/%s_data_lines.pkl' % starname, 'wb')
pickle.dump(data_lines, f)
f.close()
plot_paper(starname, data_lines)
plot_dist(starname, data_lines)
del new_data, data_lines, lines1, lines2, lines3, lines4, lines5, lines, f
os.system('rm -f ./atm_models/%s_v.atm' % alias)
os.system('rm -f ./output/%s_l.out' % alias)
os.system('rm -f ./output/%s_li.out' % alias)
os.system('rm -f ./output/%s_s.out' % alias)
os.system('rm -f ./output/%s_sn.out' % alias)
os.system('rm -f ./MOOG_linelist/lines.%s_v.txt' % alias)
os.system('rm -f ./MOOGFEB2017/abfind_%s_v_2.par' % alias)
os.system('rm -f ./MOOGFEB2017/abfind_%s_v.par' % alias)
os.system('rm -f ./MOOGFEB2017/%s_synth.par' % alias)
os.system('rm -f ./output/%s.dat' % alias)
os.system('rm -f ./output/%s_o.dat' % alias)
# Correct for the solar values, so that vsini_sun = 1.9 km/s
#vsini_lines = vsini_lines - 1.28
return vsini_lines, err_vsini_lines, weight_lines
#******************************************************************************
#******************************************************************************
#******************************************************************************
#******************************************************************************
def calc_ab(starname, err_met, err_ni, alias='test'):
moog_linelist(starname, alias)
cmd = 'cp ./MOOGFEB2017/abfind.par ./MOOGFEB2017/abfind_%s_v.par' % (alias)
os.system(cmd)
with open('./MOOGFEB2017/abfind_%s_v.par' % (alias), 'r') as par:
with open('./MOOGFEB2017/abfind_%s_v_2.par' % (alias), 'w') as par_out:
for linea in par:
columnas = linea.strip()
m = re.search(r'standard_out\s*(\S*).*', columnas)
if m:
linea = "standard_out './output/%s.dat'\n" % (alias)
m = re.search(r'summary_out\s*(\S*).*', columnas)
if m:
linea = "summary_out './output/%s_o.dat'\n" % (alias)
m = re.search(r'model_in\s*(\S*).*', columnas)
if m:
linea = "model_in './atm_models/%s_v.atm'\n" % (alias)
m = re.search(r'lines_in\s*(\S*).*', columnas)
if m:
linea = "lines_in './MOOG_linelist/lines.%s_v.txt'\n" % (alias)
par_out.writelines(linea)
cmd = 'cp ./MOOGFEB2017/abfind_%s_v_2.par ./MOOGFEB2017/abfind_%s_v.par' % (alias, alias)
os.system(cmd)
os.system('MOOGSILENT > temp.log 2>&1 << EOF\nMOOGFEB2017/abfind_%s_v.par\n\nEOF' % alias)
ab = {}
dev = {}
with open('./output/%s_o.dat' % alias) as output:
for linea in output:
linea = linea.strip()
m = re.search(r'[a-z]', linea)
if m is None:
m = re.search(r'[\d]', linea)
if m:
linea = linea.split()
ID = int(float(linea[1]))
if ID == 26:
ab_id = 7.50# + met
dev_id = err_met
else:
ab_id = 6.22# + ab_ni
dev_id = err_ni
ab[linea[0]] = float(linea[6]) - ab_id# - met
dev[linea[0]] = dev_id
del m
del cmd, par, par_out, output
return ab, dev
#******************************************************************************
#******************************************************************************
#******************************************************************************
#******************************************************************************
def moog_linelist(starname, alias='test'):
linelist = np.genfromtxt('./Spectra/linelist_vsini.dat', dtype=None, skip_header=2,\
names=('line', 'excit', 'loggf', 'num', 'ion'))
line = linelist['line']
excit = linelist['excit']
loggf = linelist['loggf']
num = linelist['num']
ion = linelist['ion']
file_ew = np.genfromtxt('./EW/%s_vsini.txt' % starname, dtype=None,\
names=('line', 'ew', 'ew_e', 'ew_err1', 'ew_err2'))
line_ew_b = file_ew['line']
ew_b = file_ew['ew']
ew_err_b = np.maximum(file_ew['ew_err1'], file_ew['ew_err2'])
#Take only the lines that 10. <= EW <=150 and the error in the EW is lower than the EW
ilines = np.where((ew_b >= 10.) & (ew_b <= 150.) & (old_div(ew_err_b, ew_b) <= 1.0))[0]
line_ew = line_ew_b[ilines]
ew = ew_b[ilines]
ew_err = ew_err_b[ilines]
with open('MOOG_linelist/lines.%s_v.txt' % alias, 'w') as output:
output.write(' %s_vsini.txt\n' % starname)
for i, l in enumerate(line_ew):
index = np.where(line == l)[0]
if len(index) == 1:
index = int(index[0])
output.write('%s%7.2f%s%4.1f%s%5.2f%s%6.3f%s%7.4f\n' %\
(' '*2, line[index], ' '*4, ion[index], ' '*7, excit[index], \
' '*5, loggf[index], ' '*23, ew[i]))
del output
del linelist, line, excit, loggf, num, ion, file_ew, line_ew_b, ew_b, ew_err_b,\
ilines, line_ew, ew, ew_err
#******************************************************************************
#******************************************************************************
#******************************************************************************
#******************************************************************************
def compute_snr(x, data, w):
"""
Computes the S/N for spectra using a set of
ranges where there shouldn't be any lines,
and only continuum.
"""
k1 = np.where(x < (w - 0.3))[0]
k2 = np.where(x > (w + 0.3))[0]
sn = []
if k1.size > 0:
sn.append(old_div(np.mean(data[k1]), np.std(data[k1])))
if k2.size > 0:
sn.append(old_div(np.mean(data[k2]), np.std(data[k2])))
del k1, k2
return np.mean(sn)
#******************************************************************************
def plot_paper(starname, data_lines):
ticks_font = matplotlib.font_manager.FontProperties(style='normal', size=9,
weight='medium', stretch='normal')
def _plot_grid(ax, value, w, grid, S, yfit, type_g='vsini'):
ax.plot(grid, S, '.-', color='dimgrey')
ax.plot(grid, yfit, color='tomato')
ax.axvline(value, color='tomato')
ax.locator_params(nbins=4)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y')
if type_g == 'vsini':
ax.set_xlabel(r'$v\sin i$ (km/s)', fontsize=9)
else:
ax.set_xlabel('abundance', fontsize=9)
ax.set_ylabel('$S$', fontsize=9)
_ = [i.set_linewidth(0.5) for i in ax.spines.values()]
for label in ax.get_yticklabels():
label.set_fontproperties(ticks_font)
for label in ax.get_xticklabels():
label.set_fontproperties(ticks_font)
sy, ey = ax.get_ylim()
sx, ex = ax.get_xlim()
ax.text(ex - 5.*(ex-sx)/11., ey - (ey-sy)/10., \
r'$\lambda$ = %s $\AA$' % (w),\
style='italic', fontsize=8, backgroundcolor='white')
del sx, sy, ex, ey
return ax
def _add_plot(ax, data, model, w, vsini, vmac, badfit):
x_limits = [data[:, 0][0], data[:, 0][-1]]
i_m1 = np.where(model[:, 0] < data[:, 0][0])[0]
i_m2 = np.where(model[:, 0] > data[:, 0][-1])[0]
model[:, 0][i_m1] = 1.0
model[:, 0][i_m2] = 1.0
del i_m1, i_m2
if badfit:
r = ax.patch
r.set_facecolor('red')
r.set(alpha=0.2)
del r
ax.plot(data[:, 0], data[:, 1], marker='.', ls='None', color='dimgrey')
ax.plot(model[:, 0], model[:, 1], color='tomato')
ax.tick_params(axis='both', which='major', labelsize=8)
_ = [i.set_linewidth(0.5) for i in ax.spines.values()]
for label in ax.get_yticklabels():
label.set_fontproperties(ticks_font)
for label in ax.get_xticklabels():
label.set_fontproperties(ticks_font)
ax.set_xlabel(r'$\lambda$ ($\AA$)', fontsize=9)
ax.set_ylabel(r'$F_\lambda$ $d\lambda$', fontsize=9)
ax.set_xlim(x_limits)
ax.locator_params(axis='x', nbins=6)
ax.locator_params(axis='y', nbins=5)
sy, ey = ax.get_ylim()
sx, ex = ax.get_xlim()
ax.text(sx + (ex-sx)/13., sy + (ey-sy)/10., \
r'$\lambda$ = %s $\AA$'
'\n'
r'$v \sin{i}$ = %.2f'
'\n'
r'$v_{macro}$ = %.2f' % (w, vsini, vmac),\
style='italic', fontsize=8)
del x_limits, sx, sy, ex, ey
return ax
try:
lines = sorted(data_lines.keys())
n = len(lines)
cols, rows = 3, n
fig, ax = plt.subplots(rows, cols, figsize=(cols*4, rows*2.5))
if rows == 1:
d = data_lines[lines[0]]
ax[0] = _plot_grid(ax[0], d['abundance'], lines[0], d['a_grid'],
d['S_a'], d['yfit_a'], type_g='abundance')
ax[1] = _plot_grid(ax[1], d['vsini'], lines[0], d['v_grid'],
d['S_v'], d['yfit_v'], type_g='vsini')
ax[2] = _add_plot(ax[2], d['data'], d['model'], lines[0], d['vsini'],
d['vmac'], d['badfit'])
del d
else:
for r in range(rows):
d = data_lines[lines[r]]
ax[r][0] = _plot_grid(ax[r][0], d['abundance'], lines[r], d['a_grid'],
d['S_a'], d['yfit_a'], type_g='abundance')
ax[r][1] = _plot_grid(ax[r][1], d['vsini'], lines[r], d['v_grid'],
d['S_v'], d['yfit_v'], type_g='vsini')
ax[r][2] = _add_plot(ax[r][2], d['data'], d['model'], lines[r],
d['vsini'], d['vmac'], d['badfit'])
del d
fig.subplots_adjust(hspace=0.3, wspace=0.25, bottom=0.05, left=0.07, right=0.98, top=0.98)
fig.savefig('./plots_broadening/%s_vsini_paper.pdf' % starname)
plt.close('all')
del lines, n, cols, rows, fig, ax
except:
pass
def plot_dist(starname, data_lines):
try:
lines = sorted(data_lines.keys())
n = len(lines)
fig, ax = plt.subplots(1, n, figsize=(n*3, 3))
if n == 1:
d = data_lines[lines[0]]
if 'vsini_dist' in d:
ax.hist(d['vsini_dist'], bins=40)
p = np.percentile(d['vsini_dist'], [16, 50, 84])
ax.axvline(p[1], label=r'%.2f $\pm$ %.2f km/s' % (p[1], max(p[1]-p[0], p[2]-p[1])), color='orange')
ax.legend()
ax.set_xlabel('vsini (km/s)')
del d
else:
for i in range(n):
d = data_lines[lines[i]]
if 'vsini_dist' in d:
ax[i].hist(d['vsini_dist'], bins=40)
p = np.percentile(d['vsini_dist'], [16, 50, 84])
ax[i].axvline(p[1], label=r'%.2f $\pm$ %.2f km/s' % (p[1], max(p[1]-p[0], p[2]-p[1])), color='orange')
ax[i].legend()
ax[i].set_xlabel('vsini (km/s)')
del d
fig.subplots_adjust(hspace=0.3, wspace=0.25, bottom=0.2, left=0.03, right=0.98, top=0.95)
fig.savefig('./plots_broadening/%s_vsini_dist.pdf' % starname)
plt.close('all')
except Exception as e:
print(e)
pass
|
msotov/SPECIES
|
CalcBroadening.py
|
Python
|
mit
| 65,014
|
[
"Gaussian"
] |
ce7303ac708fc97440b32e05193ef435ee30bd24576de545a263b78b1aa99bbb
|
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_10/models/cashbotHQ/ZONE13a',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10025: {'type': 'attribModifier',
'name': 'strength',
'comment': '',
'parentEntId': 10002,
'attribName': 'strength',
'recursive': 1,
'typeName': 'goon',
'value': '10'},
10001: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1.5,
'attackRadius': 20,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 24,
'velocity': 7},
10005: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1.5,
'attackRadius': 20,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 24,
'velocity': 7},
10006: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1.5,
'attackRadius': 20,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 24,
'velocity': 7},
10014: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10013,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1.5,
'attackRadius': 20,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 24,
'velocity': 7},
10016: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10015,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1.5,
'attackRadius': 20,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 24,
'velocity': 7},
10018: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10017,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1.5,
'attackRadius': 20,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 24,
'velocity': 7},
10021: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10020,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1.5,
'attackRadius': 20,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 24,
'velocity': 7},
10024: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10023,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1.5,
'attackRadius': 20,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 24,
'velocity': 7},
10035: {'type': 'healBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10037,
'pos': Point3(-56.3795814514, 0.0, 0.0),
'hpr': Vec3(106.821411133, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'rewardPerGrab': 7,
'rewardPerGrabMax': 8},
10036: {'type': 'healBarrel',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10037,
'pos': Point3(15.3852472305, 21.0357513428, 0.0),
'hpr': Vec3(52.4314079285, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'rewardPerGrab': 7,
'rewardPerGrabMax': 8},
10029: {'type': 'model',
'name': 'rightPillar',
'comment': '',
'parentEntId': 10032,
'pos': Point3(0.0, -22.3441867828, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10030: {'type': 'model',
'name': 'leftPillar',
'comment': '',
'parentEntId': 10032,
'pos': Point3(0.0, 21.9451503754, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10033: {'type': 'model',
'name': 'backPillar',
'comment': '',
'parentEntId': 10032,
'pos': Point3(41.4432792664, 0.0, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10034: {'type': 'model',
'name': 'frontPillar',
'comment': '',
'parentEntId': 10032,
'pos': Point3(-41.0848464966, 0.0, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10039: {'type': 'model',
'name': 'rightPillar',
'comment': '',
'parentEntId': 10038,
'pos': Point3(0.0, -66.8615875244, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10040: {'type': 'model',
'name': 'leftPillar',
'comment': '',
'parentEntId': 10038,
'pos': Point3(0.0, 67.0966033936, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10042: {'type': 'model',
'name': 'frontRightPillar',
'comment': '',
'parentEntId': 10043,
'pos': Point3(0.0, -22.5711078644, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10044: {'type': 'model',
'name': 'frontLeftPillar',
'comment': '',
'parentEntId': 10043,
'pos': Point3(0.0, 22.1686630249, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10046: {'type': 'model',
'name': 'frontRightPillar',
'comment': '',
'parentEntId': 10045,
'pos': Point3(0.0, -22.5711078644, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10047: {'type': 'model',
'name': 'frontLeftPillar',
'comment': '',
'parentEntId': 10045,
'pos': Point3(0.0, 22.1686630249, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_A1'},
10049: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10048,
'pos': Point3(0.949898421764, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.29060935974, 1.29060935974, 1.29060935974),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_F1.bam'},
10050: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10048,
'pos': Point3(-13.1818971634, -7.17138242722, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_E.bam'},
10051: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10048,
'pos': Point3(0.968334257603, -13.3785037994, 0.0),
'hpr': Vec3(180.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'},
10053: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10052,
'pos': Point3(0.606362164021, -12.1353359222, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_G1.bam'},
10054: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10052,
'pos': Point3(7.85215950012, 20.0426883698, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.16659212112, 1.16659212112, 1.16659212112),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_E.bam'},
10055: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10052,
'pos': Point3(13.5166940689, -0.819138884544, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.51914477348, 1.51914477348, 1.51914477348),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_D.bam'},
10056: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10052,
'pos': Point3(10.8745326996, 4.61703014374, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'},
10057: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10052,
'pos': Point3(31.8470001221, -14.5645837784, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.1728117466, 1.1728117466, 1.1728117466),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_A.bam'},
10058: {'type': 'model',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10052,
'pos': Point3(31.9369258881, 14.3037395477, 0.0),
'hpr': Point3(90.0, 0.0, 0.0),
'scale': Vec3(1.1728117466, 1.1728117466, 1.1728117466),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_A.bam'},
10002: {'type': 'nodepath',
'name': 'goons',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Point3(270.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10007: {'type': 'nodepath',
'name': 'rightElbow',
'comment': '',
'parentEntId': 10027,
'pos': Point3(43.3083152771, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10008: {'type': 'nodepath',
'name': 'leftElbow',
'comment': '',
'parentEntId': 10026,
'pos': Point3(-38.578956604, -2.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10009: {'type': 'nodepath',
'name': 'nearRight',
'comment': '',
'parentEntId': 10027,
'pos': Point3(25.6041526794, -41.3753585815, 0.0),
'hpr': Vec3(320.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10010: {'type': 'nodepath',
'name': 'nearLeft',
'comment': '',
'parentEntId': 10026,
'pos': Point3(-25.6000003815, -41.3800010681, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10011: {'type': 'nodepath',
'name': 'farRight',
'comment': '',
'parentEntId': 10027,
'pos': Point3(25.6000003815, 41.3800010681, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10012: {'type': 'nodepath',
'name': 'farLeft',
'comment': '',
'parentEntId': 10026,
'pos': Point3(-25.6000003815, 41.3800010681, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10019: {'type': 'nodepath',
'name': 'entrance',
'comment': '',
'parentEntId': 10002,
'pos': Point3(0.0, -82.5020980835, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10022: {'type': 'nodepath',
'name': 'exit',
'comment': '',
'parentEntId': 10002,
'pos': Point3(0.0, 88.4478759766, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10026: {'type': 'nodepath',
'name': 'left',
'comment': '',
'parentEntId': 10002,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10027: {'type': 'nodepath',
'name': 'right',
'comment': '',
'parentEntId': 10002,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10028: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, -1.80477809906, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10031: {'type': 'nodepath',
'name': 'pillars',
'comment': '',
'parentEntId': 10028,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10032: {'type': 'nodepath',
'name': 'centerPillars',
'comment': '',
'parentEntId': 10031,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10037: {'type': 'nodepath',
'name': 'barrels',
'comment': '',
'parentEntId': 0,
'pos': Point3(102.779998779, -1.24000000954, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10038: {'type': 'nodepath',
'name': 'outerPillars',
'comment': '',
'parentEntId': 10031,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10043: {'type': 'nodepath',
'name': 'frontPillars',
'comment': '',
'parentEntId': 10031,
'pos': Point3(-89.9665527344, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10045: {'type': 'nodepath',
'name': 'backPillars',
'comment': '',
'parentEntId': 10031,
'pos': Point3(89.9700012207, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10048: {'type': 'nodepath',
'name': 'frontProps',
'comment': '',
'parentEntId': 10028,
'pos': Point3(-100.412567139, -10.8835134506, 0.0),
'hpr': Vec3(270.0, 0.0, 0.0),
'scale': Vec3(1.66847121716, 1.66847121716, 1.66847121716)},
10052: {'type': 'nodepath',
'name': 'backProps',
'comment': '',
'parentEntId': 10028,
'pos': Point3(100.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10000: {'type': 'path',
'name': 'triangle',
'comment': '',
'parentEntId': 10008,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1,
'pathIndex': 1,
'pathScale': 1.5},
10003: {'type': 'path',
'name': 'square',
'comment': '',
'parentEntId': 10007,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
10004: {'type': 'path',
'name': 'bowtie',
'comment': '',
'parentEntId': 10009,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1,
'pathIndex': 2,
'pathScale': 1.0},
10013: {'type': 'path',
'name': 'square',
'comment': '',
'parentEntId': 10010,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
10015: {'type': 'path',
'name': 'square',
'comment': '',
'parentEntId': 10011,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
10017: {'type': 'path',
'name': 'square',
'comment': '',
'parentEntId': 10012,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
10020: {'type': 'path',
'name': 'pace',
'comment': '',
'parentEntId': 10019,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1,
'pathIndex': 3,
'pathScale': 1.0},
10023: {'type': 'path',
'name': 'pace',
'comment': '',
'parentEntId': 10022,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1,
'pathIndex': 3,
'pathScale': 1.0}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/coghq/SellbotMegaFactoryPipeRoom_Action00.py
|
Python
|
apache-2.0
| 19,355
|
[
"Bowtie"
] |
40ddb9001b37b5fe621671a65d987b732302df0c884d972871860576c058db9a
|
##
# Copyright 2013-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing PSI, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
from distutils.version import LooseVersion
import glob
import os
import shutil
import tempfile
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_PSI(CMakeMake):
"""
Support for building and installing PSI
"""
def __init__(self, *args, **kwargs):
"""Initialize class variables custom to PSI."""
super(EB_PSI, self).__init__(*args, **kwargs)
self.psi_srcdir = None
self.install_psi_objdir = None
self.install_psi_srcdir = None
@staticmethod
def extra_options():
"""Extra easyconfig parameters specific to PSI."""
extra_vars = {
# always include running PSI unit tests (takes about 2h or less)
'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD],
}
return CMakeMake.extra_options(extra_vars)
def configure_step(self):
"""
Configure build outside of source directory.
"""
try:
objdir = os.path.join(self.builddir, 'obj')
os.makedirs(objdir)
os.chdir(objdir)
except OSError, err:
raise EasyBuildError("Failed to prepare for configuration of PSI build: %s", err)
env.setvar('F77FLAGS', os.getenv('F90FLAGS'))
# In order to create new plugins with PSI, it needs to know the location of the source
# and the obj dir after install. These env vars give that information to the configure script.
self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep))
self.install_psi_objdir = os.path.join(self.installdir, 'obj')
self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir)
env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir)
env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir)
# explicitely specify Python binary to use
pythonroot = get_software_root('Python')
if not pythonroot:
raise EasyBuildError("Python module not loaded.")
# Use EB Boost
boostroot = get_software_root('Boost')
if not boostroot:
raise EasyBuildError("Boost module not loaded.")
# pre 4.0b5, they were using autotools, on newer it's CMake
if LooseVersion(self.version) <= LooseVersion("4.0b5"):
env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python'))
env.setvar('USE_SYSTEM_BOOST', 'TRUE')
if self.toolchain.options.get('usempi', None):
# PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly
# we should always specify the sequential Fortran compiler,
# to avoid problems with -lmpi vs -lmpi_mt during linking
fcompvar = 'F77_SEQ'
else:
fcompvar = 'F77'
# update configure options
# using multi-threaded BLAS/LAPACK is important for performance,
# cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii
opt_vars = [
('cc', 'CC'),
('cxx', 'CXX'),
('fc', fcompvar),
('libdirs', 'LDFLAGS'),
('blas', 'LIBBLAS_MT'),
('lapack', 'LIBLAPACK_MT'),
]
for (opt, var) in opt_vars:
self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var)))
# -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers
# both define SEEK_SET, this makes the one for MPI be ignored
self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS'))
# specify location of Boost
self.cfg.update('configopts', "--with-boost=%s" % boostroot)
# enable support for plugins
self.cfg.update('configopts', "--with-plugins")
ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir'])
else:
self.cfg['configopts'] += "-DPYTHON_INTERPRETER=%s " % os.path.join(pythonroot, 'bin', 'python')
self.cfg['configopts'] += "-DCMAKE_BUILD_TYPE=Release "
if self.toolchain.options.get('usempi', None):
self.cfg['configopts'] += "-DENABLE_MPI=ON "
if get_software_root('impi'):
self.cfg['configopts'] += "-DENABLE_CSR=ON -DBLAS_TYPE=MKL "
CMakeMake.configure_step(self, srcdir=self.cfg['start_dir'])
def install_step(self):
"""Custom install procedure for PSI."""
super(EB_PSI, self).install_step()
# the obj and unpacked sources must remain available for working with plugins
try:
for subdir in ['obj', self.psi_srcdir]:
# copy symlinks as symlinks to work around broken symlinks
shutil.copytree(os.path.join(self.builddir, subdir), os.path.join(self.installdir, subdir),
symlinks=True)
except OSError, err:
raise EasyBuildError("Failed to copy obj and unpacked sources to install dir: %s", err)
def test_step(self):
"""
Run the testsuite of PSI4
"""
testdir = tempfile.mkdtemp()
env.setvar('PSI_SCRATCH', testdir)
super(EB_PSI, self).test_step()
try:
shutil.rmtree(testdir)
except OSError, err:
raise EasyBuildError("Failed to remove test directory %s: %s", testdir, err)
def sanity_check_step(self):
"""Custom sanity check for PSI."""
custom_paths = {
'files': ['bin/psi%s' % self.version.split('.')[0]],
'dirs': ['include', ('share/psi', 'share/psi4')],
}
super(EB_PSI, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom variables for PSI module."""
txt = super(EB_PSI, self).make_module_extra()
share_dir = os.path.join(self.installdir, 'share')
if os.path.exists(share_dir):
psi4datadir = glob.glob(os.path.join(share_dir, 'psi*'))
if len(psi4datadir) == 1:
txt += self.module_generator.set_environment('PSI4DATADIR', psi4datadir[0])
else:
raise EasyBuildError("Failed to find exactly one PSI4 data dir: %s", psi4datadir)
return txt
|
wpoely86/easybuild-easyblocks
|
easybuild/easyblocks/p/psi.py
|
Python
|
gpl-2.0
| 7,941
|
[
"Psi4"
] |
ce30830a4e64914f4df7a07d0bfe68ac78d15fdf378410db44179593c6e86f7e
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import cycle
from collections import defaultdict
import numpy as np
from matplotlib.artist import Artist
from matplotlib.patches import FancyArrowPatch, ArrowStyle, FancyArrow, Circle
from matplotlib.patches import Arc, RegularPolygon, PathPatch
from matplotlib.cm import get_cmap
from matplotlib.collections import PatchCollection, PathCollection
from matplotlib.colors import ListedColormap, Normalize, ColorConverter
from matplotlib.markers import MarkerStyle
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid1 import make_axes_locatable
import nngt
from nngt.lib import POS, nonstring_container, is_integer
from .custom_plt import palette_continuous, palette_discrete, format_exponent
from .chord_diag import chord_diagram as _chord_diag
from .hive_helpers import *
'''
Network plotting
================
Implemented
-----------
Simple representation for spatial graphs, random distribution if non-spatial.
Support for edge-size (according to betweenness or synaptic weight).
Objectives
----------
Implement the spring-block minimization.
If edges have varying size, plot only those that are visible (size > min)
'''
__all__ = ["chord_diagram", "draw_network", "hive_plot", "library_draw"]
# ------- #
# Drawing #
# ------- #
def draw_network(network, nsize="total-degree", ncolor="group", nshape="o",
nborder_color="k", nborder_width=0.5, esize=1., ecolor="k",
ealpha=0.5, max_nsize=None, max_esize=2., curved_edges=False,
threshold=0.5, decimate_connections=None, spatial=True,
restrict_sources=None, restrict_targets=None,
restrict_nodes=None, restrict_edges=None,
show_environment=True, fast=False, size=(600, 600),
xlims=None, ylims=None, dpi=75, axis=None, colorbar=False,
cb_label=None, layout=None, show=False, **kwargs):
'''
Draw a given graph/network.
Parameters
----------
network : :class:`~nngt.Graph` or subclass
The graph/network to plot.
nsize : float, array of float or string, optional (default: "total-degree")
Size of the nodes as a percentage of the canvas length. Otherwise, it
can be a string that correlates the size to a node attribute among
"in/out/total-degree", "in/out/total-strength", or "betweenness".
ncolor : float, array of floats or string, optional (default: 0.5)
Color of the nodes; if a float in [0, 1], position of the color in the
current palette, otherwise a string that correlates the color to a node
attribute among "in/out/total-degree", "betweenness" or "group".
nshape : char, array of chars, or groups, optional (default: "o")
Shape of the nodes (see `Matplotlib markers <http://matplotlib.org/api/
markers_api.html?highlight=marker#module-matplotlib.markers>`_).
When using groups, they must be pairwise disjoint; markers will be
selected iteratively from the matplotlib default markers.
nborder_color : char, float or array, optional (default: "k")
Color of the node's border using predefined `Matplotlib colors
<http://matplotlib.org/api/colors_api.html?highlight=color
#module-matplotlib.colors>`_).
or floats in [0, 1] defining the position in the palette.
nborder_width : float or array of floats, optional (default: 0.5)
Width of the border in percent of canvas size.
esize : float, str, or array of floats, optional (default: 0.5)
Width of the edges in percent of canvas length. Available string values
are "betweenness" and "weight".
ecolor : str, char, float or array, optional (default: "k")
Edge color. If ecolor="groups", edges color will depend on the source
and target groups, i.e. only edges from and toward same groups will
have the same color.
max_esize : float, optional (default: 5.)
If a custom property is entered as `esize`, this normalizes the edge
width between 0. and `max_esize`.
threshold : float, optional (default: 0.5)
Size under which edges are not plotted.
decimate_connections : int, optional (default: keep all connections)
Plot only one connection every `decimate_connections`.
Use -1 to hide all edges.
spatial : bool, optional (default: True)
If True, use the neurons' positions to draw them.
restrict_sources : str, group, or list, optional (default: all)
Only draw edges starting from a restricted set of source nodes.
restrict_targets : str, group, or list, optional (default: all)
Only draw edges ending on a restricted set of target nodes.
restrict_nodes : str, group, or list, optional (default: plot all nodes)
Only draw a subset of nodes.
restrict_edges : list of edges, optional (default: all)
Only draw a subset of edges.
show_environment : bool, optional (default: True)
Plot the environment if the graph is spatial.
fast : bool, optional (default: False)
Use a faster algorithm to plot the edges. Zooming on the drawing made
using this method leaves the size of the nodes and edges unchanged, it
is therefore not recommended when size consistency matters, e.g. for
some spatial representations.
size : tuple of ints, optional (default: (600,600))
(width, height) tuple for the canvas size (in px).
dpi : int, optional (default: 75)
Resolution (dot per inch).
axis : matplotlib axis, optional (default: create new axis)
Axis on which the network will be plotted.
colorbar : bool, optional (default: False)
Whether to display a colorbar for the node colors or not.
cb_label : str, optional (default: None)
A label for the colorbar.
layout : str, optional (default: random or spatial positions)
Name of a standard layout to structure the network. Available layouts
are: "circular" or "random". If no layout is provided and the network
is spatial, then node positions will be used by default.
show : bool, optional (default: True)
Display the plot immediately.
**kwargs : dict
Optional keyword arguments including `node_cmap` to set the
nodes colormap (default is "magma" for continuous variables and
"Set1" for groups) and "title" to add a title to the plot.
'''
import matplotlib.pyplot as plt
# figure and axes
size_inches = (size[0]/float(dpi), size[1]/float(dpi))
if axis is None:
fig = plt.figure(facecolor='white', figsize=size_inches,
dpi=dpi)
axis = fig.add_subplot(111, frameon=0, aspect=1)
axis.set_axis_off()
pos = None
# restrict sources and targets
restrict_sources = _convert_to_nodes(restrict_sources,
"restrict_sources", network)
restrict_targets = _convert_to_nodes(restrict_targets,
"restrict_targets", network)
restrict_nodes = _convert_to_nodes(restrict_nodes,
"restrict_nodes", network)
if restrict_nodes is not None and restrict_sources is not None:
restrict_sources = \
set(restrict_nodes).intersection(restrict_sources)
elif restrict_nodes is not None:
restrict_sources = set(restrict_nodes)
if restrict_nodes is not None and restrict_targets is not None:
restrict_targets = \
set(restrict_nodes).intersection(restrict_targets)
elif restrict_nodes is not None:
restrict_targets = set(restrict_nodes)
# get nodes and edges
n = network.node_nb() if restrict_nodes is None \
else len(restrict_nodes)
adj_mat = network.adjacency_matrix(weights=None)
if restrict_sources is not None:
remove = np.array(
[1 if node not in restrict_sources else 0
for node in range(network.node_nb())],
dtype=bool)
adj_mat[remove] = 0
if restrict_targets is not None:
remove = np.array(
[1 if node not in restrict_targets else 0
for node in range(network.node_nb())],
dtype=bool)
adj_mat[:, remove] = 0
edges = (np.array(adj_mat.nonzero()).T if restrict_edges is None else
restrict_edges)
e = len(edges)
# compute properties
decimate_connections = 1 if decimate_connections is None\
else decimate_connections
# get node and edge shape/size properties
simple_nodes = kwargs.get("simple_nodes", False)
if fast:
simple_nodes = True
max_nsize = (20 if simple_nodes else 5) if max_nsize is None else max_nsize
markers, nsize, esize = _node_edge_shape_size(
network, nshape, nsize, max_nsize, esize, max_esize, restrict_nodes,
edges, size, threshold, simple_nodes=simple_nodes)
# node color information
default_ncmap = (palette_discrete() if not nonstring_container(ncolor) and
ncolor == "group" else palette_continuous())
nalpha = kwargs.get("nalpha", 1)
ncmap = get_cmap(kwargs.get("node_cmap", default_ncmap))
node_color, nticks, ntickslabels, nlabel = \
_node_color(network, restrict_nodes, ncolor)
if nonstring_container(ncolor):
assert len(ncolor) == n, "For color arrays, one " +\
"color per node is required."
ncolor = "custom"
c = node_color
if not nonstring_container(nborder_color):
nborder_color = np.repeat(nborder_color, n)
# check edge color
group_based = False
default_ecmap = (palette_discrete() if not nonstring_container(ncolor) and
ecolor == "group" else palette_continuous())
if isinstance(ecolor, float):
ecolor = np.repeat(ecolor, e)
elif ecolor == "groups" or ecolor == "group":
if not network.is_network():
raise TypeError(
"The graph must be a Network to use `ecolor='groups'`.")
group_based = True
ecolor = {}
for i, src in enumerate(network.population):
if network.population[src].ids:
idx1 = network.population[src].ids[0]
for j, tgt in enumerate(network.population):
if network.population[tgt].ids:
idx2 = network.population[tgt].ids[0]
if src == tgt:
ecolor[(src, tgt)] = node_color[idx1]
else:
ecolor[(src, tgt)] = \
np.abs(0.8*node_color[idx1]
- 0.2*node_color[idx2])
# draw
pos = np.zeros((n, 2))
if layout == "circular":
pos = _circular_layout(network, nsize)
elif layout is None and spatial and network.is_spatial():
if show_environment:
nngt.geometry.plot.plot_shape(network.shape, axis=axis,
show=False)
nodes = None if restrict_nodes is None else list(restrict_nodes)
pos = network.get_positions(nodes=nodes)
elif nonstring_container(layout):
assert np.shape(layout) == (n, 2), "One position per node is required."
pos = np.asarray(layout)
else:
pos[:, 0] = size[0]*(np.random.uniform(size=n)-0.5)
pos[:, 1] = size[1]*(np.random.uniform(size=n)-0.5)
# make nodes
nodes = []
if nonstring_container(c) and not isinstance(c[0], str):
# make the colorbar for the nodes
cmap = ncmap
if colorbar:
clist = np.unique(c, axis=0) if ncolor == "group" else None
cnorm = None
if ncolor.startswith("group"):
cmap = _discrete_cmap(len(nticks), ncmap, clist=clist)
cnorm = Normalize(nticks[0]-0.5, nticks[-1] + 0.5)
else:
cnorm = Normalize(np.min(c), np.max(c))
sm = plt.cm.ScalarMappable(cmap=cmap, norm=cnorm)
c = cnorm(c)
if ncolor.startswith("group"):
sm.set_array(nticks)
else:
sm.set_array(c)
plt.subplots_adjust(right=0.95)
divider = make_axes_locatable(axis)
cax = divider.append_axes("right", size="5%", pad=0.05)
if ncolor.startswith("group"):
cb = plt.colorbar(sm, ticks=nticks, cax=cax, shrink=0.8)
cb.set_ticklabels(ntickslabels)
if nlabel:
cb.set_label(nlabel)
else:
cb = plt.colorbar(sm, cax=cax, shrink=0.8)
if cb_label is not None:
cb.ax.set_ylabel(cb_label)
else:
cmin, cmax = np.min(c), np.max(c)
if cmin != cmax:
c = (c - cmin)/(cmax - cmin)
c = cmap(c)
else:
if not nonstring_container(c) and not isinstance(c, str):
minc = np.min(node_color)
c = np.array(
[ncmap((node_color - minc)/(np.max(node_color) - minc))]*n)
# plot nodes
if simple_nodes:
if nonstring_container(nshape):
# matplotlib scatter does not support marker arrays
if isinstance(nshape[0], nngt.Group):
for g in nshape:
ids = g.ids if restrict_nodes is None \
else list(set(g.ids).intersection(restrict_nodes))
axis.scatter(pos[ids, 0], pos[ids, 1], color=c[ids],
s=0.5*np.array(nsize)[ids],
marker=markers[ids[0]], zorder=2,
edgecolors=nborder_color,
linewidths=nborder_width, alpha=nalpha)
else:
ids = range(network.node_nb()) if restrict_nodes is None \
else restrict_nodes
for i in ids:
axis.plot(
pos[i, 0], pos[i, 1], color=c[i], ms=0.5*nsize[i],
marker=nshape[i], ls="", zorder=2,
mec=nborder_color[i], mew=nborder_width, alpha=nalpha)
else:
axis.scatter(pos[:, 0], pos[:, 1], color=c, s=0.5*np.array(nsize),
marker=nshape, zorder=2, edgecolor=nborder_color,
linewidths=nborder_width, alpha=nalpha)
else:
axis.set_aspect(1.)
if network.is_network():
for group in network.population.values():
idx = group.ids if restrict_nodes is None \
else list(set(restrict_nodes).intersection(group.ids))
for i, fc in zip(idx, c[idx]):
m = MarkerStyle(markers[i]).get_path()
transform = Affine2D().scale(
0.5*nsize[i]).translate(pos[i][0], pos[i][1])
patch = PathPatch(m.transformed(transform), facecolor=fc,
edgecolor=nborder_color[i], alpha=nalpha)
nodes.append(patch)
else:
for i, ci in enumerate(c):
m = MarkerStyle(markers[i]).get_path()
transform = Affine2D().scale(0.5*nsize[i]).translate(
pos[i][0], pos[i][1])
patch = PathPatch(m.transformed(transform), facecolor=ci,
edgecolor=nborder_color[i], alpha=nalpha)
nodes.append(patch)
nodes = PatchCollection(nodes, match_original=True, alpha=nalpha)
nodes.set_zorder(2)
axis.add_collection(nodes)
if not show_environment or not spatial or not network.is_spatial():
# axis.get_data()
_set_ax_lim(axis, pos[:, 0], pos[:, 1], xlims, ylims)
# use quiver to draw the edges
if e and decimate_connections != -1:
avg_size = np.average(nsize)
arr_style = ArrowStyle.Simple(head_length=0.15*avg_size,
head_width=0.1*avg_size,
tail_width=0.05*avg_size)
arrows = []
if group_based:
for src_name, src_group in network.population.items():
for tgt_name, tgt_group in network.population.items():
s_ids = src_group.ids
if restrict_sources is not None:
s_ids = list(set(restrict_sources).intersection(s_ids))
t_ids = tgt_group.ids
if restrict_targets is not None:
t_ids = list(set(restrict_targets).intersection(t_ids))
if t_ids and s_ids:
s_min, s_max = np.min(s_ids), np.max(s_ids) + 1
t_min, t_max = np.min(t_ids), np.max(t_ids) + 1
edges = np.array(
adj_mat[s_min:s_max, t_min:t_max].nonzero(),
dtype=int)
edges[0, :] += s_min
edges[1, :] += t_min
if nonstring_container(esize):
keep = (esize > 0)
edges = edges[:, keep]
esize = esize[keep]
if decimate_connections > 1:
edges = edges[:, ::decimate_connections]
if nonstring_container(esize):
esize = esize[::decimate_connections]
# plot
ec = default_ecmap(ecolor[(src_name, tgt_name)])
if fast:
dl = 0.5*np.max(nsize)
arrow_x = pos[edges[1], 0] - pos[edges[0], 0]
arrow_x -= np.sign(arrow_x) * dl
arrow_y = pos[edges[1], 1] - pos[edges[0], 1]
arrow_x -= np.sign(arrow_y) * dl
axis.quiver(
pos[edges[0], 0], pos[edges[0], 1], arrow_x,
arrow_y, scale_units='xy', angles='xy',
scale=1, alpha=ealpha, width=1.5e-3,
linewidths=0.5*esize, edgecolors=ec, zorder=1)
else:
for s, t in zip(edges[0], edges[1]):
xs, ys = pos[s, 0], pos[s, 1]
xt, yt = pos[t, 0], pos[t, 1]
dl = 0.5*nsize[t]
dx = xt-xs
dx -= np.sign(dx) * dl
dy = yt-ys
dy -= np.sign(dy) * dl
if curved_edges:
arrow = FancyArrowPatch(
posA=(xs, ys), posB=(xt, yt),
arrowstyle=arr_style,
connectionstyle='arc3,rad=0.1',
alpha=ealpha, fc=ec, lw=0.5)
axis.add_patch(arrow)
else:
arrows.append(FancyArrow(
xs, ys, dx, dy, width=0.3*avg_size,
head_length=0.7*avg_size,
head_width=0.7*avg_size,
length_includes_head=True,
alpha=ealpha, fc=ec, lw=0.5))
else:
if e and decimate_connections != -1:
# keep only large edges
if nonstring_container(esize):
keep = (esize > 0)
edges = edges[keep]
if nonstring_container(ecolor):
ecolor = ecolor[keep]
esize = esize[keep]
if decimate_connections > 1:
edges = edges[::decimate_connections]
if nonstring_container(esize):
esize = esize[::decimate_connections]
if nonstring_container(ecolor):
ecolor = ecolor[::decimate_connections]
# keep only desired edges
if None not in (restrict_sources, restrict_targets):
new_edges = []
for edge in edges:
s, t = edge
if s in restrict_sources and t in restrict_targets:
new_edges.append(edge)
edges = np.array(new_edges, dtype=int)
if restrict_nodes is not None:
nodes = list(restrict_nodes)
nodes.sort()
for i, node in enumerate(nodes):
edges[edges == node] = i
elif restrict_sources is not None:
new_edges = []
for edge in edges:
s, _ = edge
if s in restrict_sources:
new_edges.append(edge)
edges = np.array(new_edges, dtype=int)
elif restrict_targets is not None:
new_edges = []
for edge in edges:
_, t = edge
if t in restrict_targets:
new_edges.append(edge)
edges = np.array(new_edges, dtype=int)
if isinstance(ecolor, str):
ecolor = [ecolor for i in range(0, e, decimate_connections)]
if len(edges) and fast:
dl = 0.5*np.max(nsize) if not simple_nodes else 0.
arrow_x = pos[edges[:, 1], 0] - pos[edges[:, 0], 0]
arrow_x -= np.sign(arrow_x) * dl
arrow_y = pos[edges[:, 1], 1] - pos[edges[:, 0], 1]
arrow_x -= np.sign(arrow_y) * dl
axis.quiver(pos[edges[:, 0], 0], pos[edges[:, 0], 1], arrow_x,
arrow_y, scale_units='xy', angles='xy', scale=1,
alpha=ealpha, width=1.5e-3, linewidths=0.5*esize,
ec=ecolor, fc=ecolor, zorder=1)
elif len(edges):
for i, (s, t) in enumerate(edges):
xs, ys = pos[s, 0], pos[s, 1]
xt, yt = pos[t, 0], pos[t, 1]
if curved_edges:
arrow = FancyArrowPatch(
posA=(xs, ys), posB=(xt, yt), arrowstyle=arr_style,
connectionstyle='arc3,rad=0.1',
alpha=ealpha, fc=ecolor[i], lw=0.5)
axis.add_patch(arrow)
else:
dl = 0.5*nsize[t]
dx = xt-xs
dx -= np.sign(dx) * dl
dy = yt-ys
dy -= np.sign(dy) * dl
arrows.append(FancyArrow(
xs, ys, dx, dy, width=0.3*avg_size,
head_length=0.7*avg_size, head_width=0.7*avg_size,
length_includes_head=True, alpha=ealpha,
fc=ecolor[i], lw=0.5))
if not fast:
arrows = PatchCollection(arrows, match_original=True, alpha=ealpha)
arrows.set_zorder(1)
axis.add_collection(arrows)
if kwargs.get('tight', True):
plt.tight_layout()
plt.subplots_adjust(
hspace=0., wspace=0., left=0., right=0.95 if colorbar else 1.,
top=1., bottom=0.)
if show:
plt.show()
def hive_plot(network, radial, axes=None, axes_bins=None, axes_range=None,
axes_angles=None, axes_labels=None, axes_units=None,
intra_connections=True, highlight_nodes=None,
highlight_edges=None, nsize=None, esize=None, max_nsize=10,
max_esize=1, axes_colors=None, edge_colors=None, edge_alpha=0.05,
nborder_color="k", nborder_width=0.2, show_names=True,
show_circles=False, axis=None, tight=True, show=False):
'''
Draw a hive plot of the graph.
Note
----
For directed networks, the direction of intra-axis connections is
counter-clockwise.
For inter-axes connections, the default edge color is closest to the color
of the source group (i.e. from a red group to a blue group, edge color will
be a reddish violet , while from blue to red, it will be a blueish violet).
Parameters
----------
network : :class:`~nngt.Graph`
Graph to plot.
radial : str, list of str or array-like
Values that will be used to place the nodes on the axes. Either one
identical property is used for all axes (traditional hive plot) or
one radial coordinate per axis is used (custom hive plot).
If radial is a string or a list of strings, then these must correspond
to the names of node attributes stored in the graph.
axes : str, or list of str, optional (default: one per radial coordinate)
Name of the attribute(s) that will be used to make each of the axes
(i.e. each group of nodes).
This can be either "groups" if the graph has a structure or is a
:class:`~nngt.Network`, a list of (Meta)Group names, or any (list of)
node attribute(s).
If a single node attribute is used, `axes_bins` must be provided to
make one axis for each range of values.
If there are multiple radial coordinates, then leaving `axes` blanck
will plot all nodes on each of the axes (one per radial coordinate).
axes_bins : int or array-like, optional (default: all nodes on each axis)
Required if there is a single radial coordinate and a single axis
entry: provides the bins that will be used to separate the nodes
into groups (one per axis). For N axes, there must therefore be N + 1
entries in `axes_bins`, or `axis_bins` must be equal to N, in which
case the nodes are separated into N evenly sized bins.
axes_units : str, optional
Units used to scale the axes. Either "native" to have them scaled
between the minimal and maximal radial coordinates among all axes,
"rank", to use the min and max ranks of the nodes on all axes, or
"normed", to have each axis go from zero (minimal local radial
coordinate) to one (maximal local radial coordinate).
"native" is the default if there is a single radial coordinate,
"normed" is the default for multiple coordinates.
axes_angles : list of angles, optional (default: automatic)
Angles for each of the axes, by increasing degree. If
`intra_connections` is True, then angles of duplicate axes must be
adjacent, e.g. ``[a1, a1bis, a2, a2bis, a3, a3bis]``.
axes_labels : str or list of str, optional
Label of each axis. For binned axes, it can be automatically formatted
via the three entries ``{name}``, ``{start}``, ``{stop}``.
E.g. "{name} in [{start}, {stop}]" would give "CC in [0, 0.2]" for
a first axis and "CC in [0.2, 0.4]" for a second axis.
intra_connections : bool, optional (default: True)
Show connections between nodes belonging to the same axis. If true,
then each axis is duplicated to display intra-axis connections.
highlight_nodes : list of nodes, optional (default: all nodes)
Highlight a subset of nodes and their connections, all other nodes
and connections will be gray.
highlight_edges : list of edges, optional (default: all edges)
Highlight a subset of edges; all other connections will be gray.
nsize : float, str, or array-like, optional (default: automatic)
Size of the nodes on the axes. Either a fixed size, the name of a
node attribute, or a list of user-defined values.
esize : float or str, optional (default: 1)
Size of the edges. Either a fixed size or the name of an edge
attribute.
max_nsize : float, optional (default: 10)
Maximum node size if `nsize` is an attribute or a list of
user-defined values.
max_esize : float, optional (default: 1)
Maximum edge size if `esize` is an attribute.
axes_colors : valid matplotlib color/colormap, optional (default: Set1)
Color associated to each axis.
nborder_color : matplotlib color, optional (default: "k")
Color of the node's border.
or floats in [0, 1] defining the position in the palette.
nborder_width : float, optional (default: 0.2)
Width of the border.
edge_colors : valid matplotlib color/colormap, optional (default: auto)
Color of the edges. By default it is the intermediate color between
two axes colors. To provide custom colors, they must be provided as
a dictionnary of axes edges ``{(0, 0): "r", (0, 1): "g", (1, 0): "b"}``
with default color being black.
edge_alpha : float, optional (default: 0.05)
Edge opacity.
show_names : bool, optional (default: True)
Show axes names and properties.
show_circles : bool, optional (default: False)
Show the circles associated to the maximum value of each axis.
axis : matplotlib axis, optional (default: create new axis)
Axis on which the network will be plotted.
tight : bool, optional (default: True)
Set figure layout to tight (set to False if plotting multiple axes on
a single figure).
show : bool, optional (default: True)
Display the plot immediately.
'''
import matplotlib.pyplot as plt
# get numer of axes and radial coordinates
num_axes, num_radial = _get_axes_radial_coord(
radial, axes, axes_bins, network)
# get axes names, associated nodes, and radial values
ax_names, ax_nodes, ax_radco = _get_axes_nodes(
network, radial, axes, axes_bins, num_axes, num_radial)
# get highlighted nodes and edges
if highlight_nodes:
highlight_nodes = set(highlight_nodes)
else:
highlight_nodes= set()
if highlight_edges is not None:
highlight_edges = {tuple(e) for e in highlight_edges}
# get units, maximum values for the axes, renormalize radial values
if axes_units is None:
axes_units = "normed" if num_radial > 1 else "native"
radial_values = _get_radial_values(ax_radco, axes_units, network)
# compute the angles
angles = None
if axes_angles is None:
dtheta = 2 * np.pi / num_axes
if intra_connections:
angles = []
for i in range(num_axes):
angles.extend(((i - 0.125)*dtheta, (i + 0.125)*dtheta))
else:
angles = [i*dtheta for i in range(num_axes)]
else:
angles = [a*np.pi/180 for a in ax_angles]
# renormalize the sizes
nsize = _get_size(nsize, max_nsize, ax_nodes, network)
nedges = network.edge_nb()
esize = np.ones(nedges) if esize is None else network.edge_attributes[esize]
esize *= max_esize / esize.max()
esize = {tuple(e): s for e, s in zip(network.edges_array, esize)}
# get the colors
ncolors, ecolors = _get_colors(axes_colors, edge_colors, angles, num_axes,
intra_connections, network)
# make the figure
if axis is None:
_, axis = plt.subplots()
# plot the nodes and axes
node_pos = []
max_radii = []
for i, (nn, rr) in enumerate(zip(ax_nodes, radial_values)):
if len(nn):
# max radii
rax = np.array([RMIN, rr[nn].max()])
max_radii.extend([rax[-1]]*(1 + intra_connections))
# plot max radii
if show_circles:
aa = np.arange(0, 2*np.pi, 0.02)
xx = rax[-1]*np.cos(aa)
yy = rax[-1]*np.sin(aa)
axis.plot(xx, yy, color="grey", alpha=0.2, zorder=1)
# comppute angles
aa = [angles[2*i] if intra_connections else angles[i]]
if intra_connections:
aa += [angles[2*i+1]]
for j, a in enumerate(aa):
# plot axes lines
lw = 1 if j % 2 else 2
axis.plot(rax*np.cos(a), rax*np.sin(a), color="grey", lw=lw,
zorder=1)
# compute node positions
xx = rr*np.cos(a)
yy = rr*np.sin(a)
node_pos.append(np.array([xx, yy]).T)
if highlight_nodes:
greys = list(set(nn).difference(highlight_nodes))
_plot_nodes(greys, nsize, xx, yy, "grey",
nborder_width, nborder_color, axis, zorder=3)
hlght = (nn if not highlight_nodes
else list(highlight_nodes.intersection(nn)))
_plot_nodes(hlght, nsize, xx, yy, ncolors[i],
nborder_width, nborder_color, axis, zorder=4)
else:
node_pos.extend([[]]*(1 + intra_connections))
max_radii.extend([RMIN]*(1 + intra_connections))
# plot the edges
xs, ys = [], []
for i, n1 in enumerate(ax_nodes):
targets = ax_nodes if network.is_directed() else ax_nodes[i:]
for j, n2 in enumerate(ax_nodes):
# ignore i = j if intra_connections is True
if i == j and not intra_connections:
continue
# find which axes should be used
idx_s, idx_t = _get_ax_angles(
angles, i, j, intra_connections)
# get the edges
edges = network.get_edges(source_node=n1, target_node=n2)
if len(edges):
color = ecolors[(i, j)]
paths_greys = []
paths_hghlt = []
lw = []
for (ns, nt) in edges:
pstart = node_pos[idx_s][ns]
pstop = node_pos[idx_t][nt]
contains = True
if highlight_edges is not None:
contains = (ns, nt) in highlight_edges
elif highlight_nodes is not None:
contains = \
ns in highlight_nodes or nt in highlight_nodes
if highlight_edges is None or contains:
paths_hghlt.append(_plot_bezier(
pstart, pstop, angles[idx_s], angles[idx_t],
radial_values[i][ns], radial_values[j][nt], i, j,
num_axes, xs, ys))
lw.append(esize[(ns, nt)])
else:
paths_greys.append(_plot_bezier(
pstart, pstop, angles[idx_s], angles[idx_t],
radial_values[i][ns], radial_values[j][nt], i, j,
num_axes, xs, ys))
if paths_greys:
pcol = PathCollection(
paths_greys, facecolors="none", edgecolors="grey",
alpha=0.1*edge_alpha, zorder=1)
axis.add_collection(pcol)
alpha = 0.7 if highlight_nodes else edge_alpha
pcol = PathCollection(paths_hghlt, facecolors="none", lw=lw,
edgecolors=color, alpha=alpha, zorder=2)
axis.add_collection(pcol)
_set_names_lims(ax_names, angles, max_radii, xs, ys, intra_connections,
show_names, axis, show_circles)
axis.set_aspect(1)
axis.axis('off')
if tight:
plt.tight_layout()
if show:
plt.show()
def library_draw(network, nsize="total-degree", ncolor="group", nshape="o",
nborder_color="k", nborder_width=0.5, esize=1., ecolor="k",
ealpha=0.5, max_nsize=5., max_esize=2., curved_edges=False,
threshold=0.5, decimate_connections=None, spatial=True,
restrict_sources=None, restrict_targets=None,
restrict_nodes=None, restrict_edges=None,
show_environment=True, size=(600, 600), xlims=None,
ylims=None, dpi=75, axis=None, colorbar=False,
show_labels=False, layout=None, show=False, **kwargs):
'''
Draw a given :class:`~nngt.Graph` using the underlying library's drawing
functions.
.. versionadded:: 2.0
.. warning::
When using igraph or graph-tool, if you want to use the `axis`
argument, then you must first switch the matplotlib backend to its
cairo version using e.g. ``plt.switch_backend("Qt5Cairo")`` if your
normal backend is Qt5 ("Qt5Agg").
Parameters
----------
network : :class:`~nngt.Graph` or subclass
The graph/network to plot.
nsize : float, array of float or string, optional (default: "total-degree")
Size of the nodes as a percentage of the canvas length. Otherwise, it
can be a string that correlates the size to a node attribute among
"in/out/total-degree", or "betweenness".
ncolor : float, array of floats or string, optional (default: 0.5)
Color of the nodes; if a float in [0, 1], position of the color in the
current palette, otherwise a string that correlates the color to a node
attribute among "in/out/total-degree", "betweenness" or "group".
nshape : char, array of chars, or groups, optional (default: "o")
Shape of the nodes (see `Matplotlib markers <http://matplotlib.org/api/
markers_api.html?highlight=marker#module-matplotlib.markers>`_).
When using groups, they must be pairwise disjoint; markers will be
selected iteratively from the matplotlib default markers.
nborder_color : char, float or array, optional (default: "k")
Color of the node's border using predefined `Matplotlib colors
<http://matplotlib.org/api/colors_api.html?highlight=color
#module-matplotlib.colors>`_).
or floats in [0, 1] defining the position in the palette.
nborder_width : float or array of floats, optional (default: 0.5)
Width of the border in percent of canvas size.
esize : float, str, or array of floats, optional (default: 0.5)
Width of the edges in percent of canvas length. Available string values
are "betweenness" and "weight".
ecolor : str, char, float or array, optional (default: "k")
Edge color. If ecolor="groups", edges color will depend on the source
and target groups, i.e. only edges from and toward same groups will
have the same color.
max_esize : float, optional (default: 5.)
If a custom property is entered as `esize`, this normalizes the edge
width between 0. and `max_esize`.
threshold : float, optional (default: 0.5)
Size under which edges are not plotted.
decimate_connections : int, optional (default: keep all connections)
Plot only one connection every `decimate_connections`.
Use -1 to hide all edges.
spatial : bool, optional (default: True)
If True, use the neurons' positions to draw them.
restrict_sources : str, group, or list, optional (default: all)
Only draw edges starting from a restricted set of source nodes.
restrict_targets : str, group, or list, optional (default: all)
Only draw edges ending on a restricted set of target nodes.
restrict_nodes : str, group, or list, optional (default: plot all nodes)
Only draw a subset of nodes.
restrict_edges : list of edges, optional (default: all)
Only draw a subset of edges.
show_environment : bool, optional (default: True)
Plot the environment if the graph is spatial.
fast : bool, optional (default: False)
Use a faster algorithm to plot the edges. This method leads to less
pretty plots and zooming on the graph will make the edges start or
ending in places that will differ more or less strongly from the actual
node positions.
size : tuple of ints, optional (default: (600, 600))
(width, height) tuple for the canvas size (in px).
dpi : int, optional (default: 75)
Resolution (dot per inch).
colorbar : bool, optional (default: False)
Whether to display a colorbar for the node colors or not.
axis : matplotlib axis, optional (default: create new axis)
Axis on which the network will be plotted.
layout : str, optional (default: library-dependent or spatial positions)
Name of a standard layout to structure the network. Available layouts
are: "circular", "spring-block", "random". If no layout is
provided and the network is spatial, then node positions will be
used by default.
show : bool, optional (default: True)
Display the plot immediately.
**kwargs : dict
Optional keyword arguments including `node_cmap` to set the
nodes colormap (default is "magma" for continuous variables and
"Set1" for groups) and the boolean `simple_nodes` to make node
plotting faster.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
# backend and axis
if nngt.get_config("backend") in ("graph-tool", "igraph"):
mpl_backend = mpl.get_backend()
if mpl_backend.startswith("Qt4"):
if mpl_backend != "Qt4Cairo":
plt.switch_backend("Qt4Cairo")
elif mpl_backend.startswith("Qt5"):
if mpl_backend != "Qt5Cairo":
plt.switch_backend("Qt5Cairo")
elif mpl_backend.startswith("GTK"):
if mpl_backend != "GTK3Cairo":
plt.switch_backend("GTK3Cairo")
elif mpl_backend != "cairo":
plt.switch_backend("cairo")
if axis is None:
size_inches = (size[0]/float(dpi), size[1]/float(dpi))
fig, axis = plt.subplots(figsize=size_inches)
axis.axis('off')
# default plot
if nngt.get_config("backend") == "nngt":
draw_network(
network, nsize=nsize, ncolor=ncolor, nshape=nshape,
nborder_color=nborder_color, nborder_width=nborder_width,
esize=esize, ecolor=ecolor, ealpha=ealpha, max_nsize=max_nsize,
max_esize=max_esize, curved_edges=curved_edges,
threshold=threshold, decimate_connections=decimate_connections,
spatial=spatial, restrict_nodes=restrict_nodes,
show_environment=show_environment, size=size, axis=axis,
layout=layout, show=show, **kwargs)
# otherwise, preapre data
restrict_nodes = _convert_to_nodes(restrict_nodes,
"restrict_nodes", network)
# shize and shape
markers, nsize, esize = _node_edge_shape_size(
network, nshape, nsize, max_nsize, esize, max_esize, restrict_nodes,
restrict_edges, size, threshold)
# node color information
default_ncmap = (palette_discrete() if not nonstring_container(ncolor) and
ncolor == "group" else palette_continuous())
ncmap = get_cmap(kwargs.get("node_cmap", default_ncmap))
node_color, nticks, ntickslabels, nlabel = \
_node_color(network, restrict_nodes, ncolor)
# edge color
ecolor = _edge_prop(network, ecolor)
esize = _edge_prop(network, esize)
if nonstring_container(esize) and len(esize):
esize *= max_esize / np.max(esize)
# environment
if spatial and network.is_spatial():
if show_environment:
nngt.geometry.plot.plot_shape(network.shape, axis=axis, show=False)
# do the plot
if nngt.get_config("backend") == "graph-tool":
from graph_tool.draw import (graph_draw, sfdp_layout, random_layout)
graph = network.graph
# resize
if nonstring_container(nsize):
nsize *= 0.05
nborder_width *= 0.1
esize *= 0.02
# positions
pos = None
if layout is None:
if isinstance(network, nngt.SpatialGraph) and spatial:
xy = network.get_positions()
pos = graph.new_vp("vector<double>", vals=xy)
else:
weights = (None if not network.is_weighted()
else graph.edge_properties['weight'])
pos = sfdp_layout(graph, eweight=weights)
elif layout == "random":
pos = random_layout(graph)
elif layout == "circular":
pos = graph.new_vp("vector<double>",
vals=_circular_layout(network, nsize))
elif nonstring_container(layout):
assert np.shape(layout) == (network.node_nb(), 2), \
"One position per node in the network is required."
pos = graph.new_vp("vector<double>", vals=layout)
else:
# spring block
weights = (None if not network.is_weighted()
else graph.edge_properties['weight'])
pos = sfdp_layout(graph, eweight=weights)
convert_shape = {
"o": "circle",
"v": "triangle",
"^": "triangle",
"s": "square",
"p": "pentagon",
"h": "hexagon",
"H": "hexagon",
}
shape_dict = defaultdict(
lambda k: "circle" if k not in convert_shape.values() else k)
for k, v in convert_shape.items():
shape_dict[k] = v
vprops = {
"shape": shape_dict[nshape],
"fill_color": _to_gt_prop(graph, node_color, ncmap, color=True),
"color": _to_gt_prop(graph, nborder_color, ncmap, color=True),
"size": _to_gt_prop(graph, nsize, ncmap),
"pen_width": _to_gt_prop(graph, nborder_width, ncmap),
}
if vprops["fill_color"] is None:
vprops["fill_color"] = [0.640625, 0, 0, 0.9]
eprops = None if network.edge_nb() == 0 else {
"color": _to_gt_prop(graph, ecolor, palette_continuous(),
ptype='edge', color=True),
"pen_width": _to_gt_prop(graph, esize, None, ptype='edge'),
}
if restrict_edges is not None:
efilt = network.graph.new_ep(
"bool", vals=np.zeros(network.edge_nb(), dtype=bool))
eids = [network.edge_id(e) for e in restrict_edges]
efilt.a[eids] = 1
network.graph.set_edge_filter(efilt)
graph_draw(network.graph, pos=pos, vprops=vprops, eprops=eprops,
output_size=size, mplfig=axis)
if restrict_edges is not None:
# clear edge filter
network.graph.set_edge_filter(None)
elif nngt.get_config("backend") == "networkx":
import networkx as nx
pos = None
if layout is None:
if isinstance(network, nngt.SpatialGraph) and spatial:
xy = network.get_positions()
pos = {i: coords for i, coords in enumerate(xy)}
elif layout == "circular":
pos = nx.circular_layout(network.graph)
elif layout == "random":
pos = nx.random_layout(network.graph)
elif nonstring_container(layout):
assert np.shape(layout) == (network.node_nb(), 2), \
"One position per node in the network is required."
pos = {i: coords for i, coords in enumerate(layout)}
else:
pos = nx.spring_layout(network.graph)
# normalize sizes compared to igraph
nsize = _increase_nx_size(nsize)
nborder_width = _increase_nx_size(nborder_width, 2)
edges = None if restrict_edges is None else list(restrict_edges)
nx.draw_networkx(
network.graph, pos=pos, ax=axis, nodelist=restrict_nodes,
edgelist=edges, node_size=nsize, node_color=node_color,
node_shape=nshape, linewidths=nborder_width, edge_color=ecolor,
edge_cmap=palette_continuous(), cmap=ncmap,
with_labels=show_labels, width=esize, edgecolors=nborder_color)
elif nngt.get_config("backend") == "igraph":
from igraph import Layout, PrecalculatedPalette
pos = None
if layout is None:
if isinstance(network, nngt.SpatialGraph) and spatial:
xy = network.get_positions()
pos = Layout(xy)
elif layout == "circular":
pos = network.graph.layout_circle()
elif layout == "random":
pos = network.graph.layout_random()
palette = PrecalculatedPalette(ncmap(np.linspace(0, 1, 256)))
# convert color to igraph-format
node_color = _to_ig_color(node_color)
ecolor = _to_ig_color(ecolor)
convert_shape = {
"o": "circle",
"v": "triangle-down",
"^": "triangle-up",
"s": "rectangle",
}
shape_dict = defaultdict(
lambda k: "circle" if k not in convert_shape.values() else k)
for k, v in convert_shape.items():
shape_dict[k] = v
visual_style = {
"vertex_size": nsize,
"vertex_color": node_color,
"vertex_shape": shape_dict[nshape],
"edge_width": esize,
"edge_color": ecolor,
"layout": pos,
"palette": palette,
}
graph = network.graph
if restrict_edges is not None:
eids = [network.edge_id(e) for e in restrict_edges]
graph = network.graph.subgraph_edges(eids, delete_vertices=False)
graph_artist = GraphArtist(graph, axis, **visual_style)
axis.artists.append(graph_artist)
if "title" in kwargs:
axis.set_title(kwargs["title"])
if show:
plt.show()
def chord_diagram(network, weights=True, names=None, order=None, width=0.1,
pad=2., gap=0.03, chordwidth=0.7, axis=None, colors=None,
cmap=None, alpha=0.7, use_gradient=False, show=False,
**kwargs):
"""
Plot a chord diagram.
Parameters
----------
network : a :class:`nngt.Graph` object
Network used to plot the chord diagram.
weights : bool or str, optional (default: 'weight' attribute)
Weights used to plot the connections.
names : str or list of str, optional (default: no names)
Names of the nodes that will be displayed, either a node attribute
or a custom list (must be ordered following the nodes' indices).
order : list, optional (default: order of the matrix entries)
Order in which the arcs should be placed around the trigonometric
circle.
width : float, optional (default: 0.1)
Width/thickness of the ideogram arc.
pad : float, optional (default: 2)
Distance between two neighboring ideogram arcs. Unit: degree.
gap : float, optional (default: 0.03)
Distance between the arc and the beginning of the cord.
chordwidth : float, optional (default: 0.7)
Position of the control points for the chords, controlling their shape.
axis : matplotlib axis, optional (default: new axis)
Matplotlib axis where the plot should be drawn.
colors : list, optional (default: from `cmap`)
List of user defined colors or floats.
cmap : str or colormap object (default: viridis)
Colormap to use.
alpha : float in [0, 1], optional (default: 0.7)
Opacity of the chord diagram.
use_gradient : bool, optional (default: False)
Whether a gradient should be use so that chord extremities have the
same color as the arc they belong to.
**kwargs : keyword arguments
Available kwargs are "fontsize" and "sort" (either "size" or
"distance"), "zero_entry_size" (in degrees, default: 0.5),
"rotate_names" (a bool or list of bools) to rotate (some of) the
names by 90°.
"""
ww = 'weight' if weights is True else weights
nn = network.node_attributes[names] if isinstance(names, str) else names
mat = network.adjacency_matrix(weights=ww)
return _chord_diag(
mat, nn, order=order, width=width, pad=pad, gap=gap,
chordwidth=chordwidth, ax=axis, colors=colors, cmap=cmap, alpha=alpha,
use_gradient=use_gradient, show=show, **kwargs)
# ----- #
# Tools #
# ----- #
def _node_edge_shape_size(network, nshape, nsize, max_nsize, esize, max_esize,
restrict_nodes, edges, size, threshold,
simple_nodes=False):
''' Returns the shape and size of the nodes and edges '''
n = network.node_nb() if restrict_nodes is None else len(restrict_nodes)
e = len(edges) if edges is not None else network.edge_nb()
# markers
markers = nshape
if nonstring_container(nshape):
if isinstance(nshape[0], nngt.Group):
# check disjunction
for i, g in enumerate(nshape):
for j in range(i + 1, len(nshape)):
if not set(g.ids).isdisjoint(nshape[j].ids):
raise ValueError("Groups passed to `nshape` "
"must be disjoint.")
mm = cycle(MarkerStyle.filled_markers)
shapes = np.full(network.node_nb(), "", dtype=object)
for g, m in zip(nshape, mm):
shapes[g.ids] = m
markers = list(shapes)
elif len(nshape) != network.node_nb():
raise ValueError("When passing an array of markers to "
"`nshape`, one entry per node in the "
"network must be provided.")
else:
markers = [nshape for _ in range(network.node_nb())]
# size
if isinstance(nsize, str):
if e:
nsize = _node_size(network, restrict_nodes, nsize)
nsize *= max_nsize / np.max(nsize)
else:
nsize = np.ones(n, dtype=float)
elif isinstance(nsize, (float, int, np.number)):
nsize = np.full(n, nsize, dtype=float)
elif nonstring_container(nsize):
nsize *= max_nsize / np.max(nsize)
nsize *= 0.01 * size[0]
if e:
if isinstance(esize, str):
esize = _edge_size(network, edges, esize)
esize *= max_esize
esize[esize < threshold] = 0.
esize *= 0.005 * size[0] # border on each side (so 0.5 %)
else:
esize = np.array([])
return markers, nsize, esize
def _set_ax_lim(ax, xdata, ydata, xlims, ylims):
if xlims is not None:
ax.set_xlim(*xlims)
else:
x_min, x_max = np.min(xdata), np.max(xdata)
width = x_max - x_min
ax.set_xlim(x_min - 0.05*width, x_max + 0.05*width)
if ylims is not None:
ax.set_ylim(*ylims)
else:
y_min, y_max = np.min(ydata), np.max(ydata)
height = y_max - y_min
ax.set_ylim(y_min - 0.05*height, y_max + 0.05*height)
def _node_size(network, restrict_nodes, nsize):
restrict_nodes = None if restrict_nodes is None else list(restrict_nodes)
n = network.node_nb() if restrict_nodes is None else len(restrict_nodes)
size = np.ones(n, dtype=float)
if "degree" in nsize:
deg_type = nsize[:nsize.index("-")]
size = network.get_degrees(deg_type,
nodes=restrict_nodes).astype(float)
if np.isclose(size.min(), 0):
size[np.isclose(size, 0)] = 0.5
if size.max() > 15*size.min():
size = np.power(size, 0.4)
elif "strength" in nsize:
deg_type = nsize[:nsize.index("-")]
size = network.get_degrees(deg_type, weights='weight',
nodes=restrict_nodes)
if np.isclose(size.min(), 0):
size[np.isclose(size, 0)] = 0.5
if size.max() > 15*size.min():
size = np.power(size, 0.4)
elif nsize == "betweenness":
betw = None
if restrict_nodes is None:
betw = network.get_betweenness("node").astype(float)
else:
betw = network.get_betweenness(
"node").astype(float)[restrict_nodes]
if network.is_connected("weak") == 1:
size *= betw
if size.max() > 15*size.min():
min_size = size[size!=0].min()
size[size == 0.] = min_size
size = np.log(size)
if size.min()<0:
size -= 1.1*size.min()
elif nsize == "clustering":
size *= nngt.analysis.local_clustering(network, nodes=restrict_nodes)
elif nsize in nngt.analyze_graph:
if restrict_nodes is None:
size *= nngt.analyze_graph[nsize](network)
else:
size *= nngt.analyze_graph[nsize](network)[restrict_nodes]
if np.any(size):
size /= size.max()
return size.astype(float)
def _edge_size(network, edges, esize):
num_edges = len(edges) if edges is not None else network.edge_nb()
size = np.repeat(1., num_edges)
if num_edges:
max_size = 1.
if nonstring_container(esize):
max_size = np.max(esize)
elif esize == "betweenness":
betw = network.get_betweenness("edge")
max_size = np.max(betw)
size = betw if restrict_nodes is None else betw[restrict_nodes]
elif esize == "weight":
size = network.get_weights(edges=edges)
max_size = np.max(network.get_weights())
if np.any(size):
size /= max_size
return size
def _node_color(network, restrict_nodes, ncolor):
'''
Return an array of colors, a set of ticks, and a label for the colorbar
of the nodes (if necessary).
'''
color = ncolor
nticks = None
ntickslabels = None
nlabel = ""
n = network.node_nb() if restrict_nodes is None else len(restrict_nodes)
if restrict_nodes is not None:
restrict_nodes = set(restrict_nodes)
if isinstance(ncolor, float):
color = np.repeat(ncolor, n)
elif isinstance(ncolor, str):
if ncolor == "group" or ncolor == "groups":
color = np.zeros(n)
if network.structure is not None:
l = len(network.structure)
c = np.linspace(0, 1, l)
tmp = 0
for i, group in enumerate(network.structure.values()):
if restrict_nodes is None:
color[group.ids] = c[i]
else:
ids = restrict_nodes.intersection(group.ids)
for j in range(len(ids)):
color[tmp + j] = c[i]
tmp += len(ids)
nlabel = "Neuron groups"
nticks = list(range(len(network.structure)))
ntickslabels = [s.replace("_", " ")
for s in network.structure.keys()]
else:
values = None
if "degree" in ncolor:
dtype = ncolor[:ncolor.find("-")]
values = network.get_degrees(dtype, nodes=restrict_nodes)
elif ncolor == "betweenness":
if restrict_nodes is None:
values = network.get_betweenness("node")
else:
values = network.get_betweenness(
"node")[list(restrict_nodes)]
elif ncolor in network.node_attributes:
values = network.get_node_attributes(
name=ncolor, nodes=restrict_nodes)
elif ncolor == "clustering" :
values = nngt.analysis.local_clustering(
network, nodes=restrict_nodes)
elif ncolor in nngt.analyze_graph:
if restrict_nodes is None:
values = nngt.analyze_graph[ncolor](network)
else:
values = nngt.analyze_graph[ncolor](
network)[list(restrict_nodes)]
elif ncolor in ColorConverter.colors or ncolor.startswith("#"):
color = np.repeat(ncolor, n)
else:
raise RuntimeError("Invalid `ncolor`: {}.".format(ncolor))
if values is not None:
vmin, vmax = np.min(values), np.max(values)
#~ color = (values - vmin) / (vmax - vmin)
color = values
nlabel = "Node " + ncolor.replace("_", " ")
setval = set(values)
if len(setval) <= 10:
nticks = list(setval)
nticks.sort()
ntickslabels = nticks
else:
nticks = np.linspace(vmin, vmax, 10)
ntickslabels = nticks
else:
nlabel = "Custom node colors"
uniques = np.unique(ncolor, axis=0)
if len(uniques) <= 10:
nticks = uniques
else:
nticks = np.linspace(np.min(ncolor), np.max(ncolor), 10)
ntickslabels = nticks
return color, nticks, ntickslabels, nlabel
def _edge_prop(network, value):
prop = value
enum = network.edge_nb()
if isinstance(value, str) and value not in ColorConverter.colors:
if value in network.edge_attributes:
color = network.edge_attributes[value]
elif value == "betweenness":
prop = network.get_betweenness("edge")
else:
raise RuntimeError("Invalid `value`: {}.".format(value))
return prop
def _discrete_cmap(N, base_cmap=None, clist=None):
'''
Create an N-bin discrete colormap from the specified input map
Parameters
----------
N : number of values
base_cmap : str, None, or cmap object
clist : list of colors
# Modified from Jake VanderPlas
# License: BSD-style
'''
import matplotlib.pyplot as plt
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap, N)
color_list = base(np.linspace(0, 1, N)) if clist is None else clist
cmap_name = base.name + str(N)
try:
return base.from_list(cmap_name, color_list, N)
except:
return ListedColormap(color_list, cmap_name, N=N)
def _convert_to_nodes(node_restriction, name, network):
if nonstring_container(node_restriction):
if isinstance(node_restriction[0], str):
assert network.structure is not None, \
"`" + name + "` can be string only for Network or graph " \
"with a `structure`."
ids = set()
for name in node_restriction:
ids.update(network.structure[name].ids)
return ids
elif isinstance(node_restriction[0], nngt.Group):
ids = set()
for g in node_restriction:
ids.update(g.ids)
return ids
return set(node_restriction)
elif isinstance(node_restriction, str):
assert network.is_network(), \
"`" + name + "` can be string only for Network."
return set(network.structure[node_restriction].ids)
elif isinstance(node_restriction, nngt.Group):
return set(node_restriction.ids)
elif node_restriction is not None:
raise ValueError(
"Invalid `" + name + "`: '{}'".format(node_restriction))
return node_restriction
def _custom_arrows(sources, targets, angle):
'''
Create a curved arrow between `source` and `target` as the combination of
the arc of a circle and a triangle.
The initial and final angle $\alpha$ between the source-target line and
the arrow is linked to the radius of the circle, $r$ and the distance $d$
between the points:
.. math:: r = \frac{d}{2 \cdot \tan(\alpha)}
The beginning and the end of the arc are given through initial and final
angles, respectively $\theta_1$ and $\theta_2$, which are given with
respect to the y-axis; This leads to $\alpha = 0.5(\theta_1 - \theta_2)$.
'''
# compute the distances between the points
pass
#~ # compute the radius and the position of the center of the circle
#~ #========Line
#~ arc = Arc([centX,centY],radius,radius,angle=angle_,
#~ theta1=0,theta2=theta2_,capstyle='round',linestyle='-',lw=10,color=color_)
#~ ax.add_patch(arc)
#~ #========Create the arrow head
#~ endX=centX+(radius/2)*np.cos(rad(theta2_+angle_)) #Do trig to determine end position
#~ endY=centY+(radius/2)*np.sin(rad(theta2_+angle_))
#~ ax.add_patch( #Create triangle as arrow head
#~ RegularPolygon(
#~ (endX, endY), # (x,y)
#~ 3, # number of vertices
#~ radius/9, # radius
#~ rad(angle_+theta2_), # orientation
#~ color=color_
#~ )
#~ )
def _to_ig_color(color):
import igraph as ig
if isinstance(color, str) and color not in ig.known_colors:
color = str(ColorConverter.to_rgb(color))[1:-1]
elif nonstring_container(color) and len(color):
# need to convert floating point colors to [0, 255] integers
if is_integer(color[0]) or isinstance(color[0], float):
vmin = np.min(color)
vmax = np.max(color)
vint = vmax - vmin
if vint > 0:
color = [int(255 * (v - vmin) / vint) for v in color]
else:
color = [0]*len(color)
else:
for i, c in enumerate(color):
if isinstance(color, str) and color not in ig.known_colors:
color[i] = str(ColorConverter.to_rgb(color))[1:-1]
return color
def _increase_nx_size(size, factor=4):
if isinstance(size, float) or is_integer(size):
return factor*size
elif nonstring_container(size) and len(size):
if isinstance(size[0], float) or is_integer(size[0]):
return factor*np.asarray(size)
return size
def _to_gt_prop(graph, value, cmap, ptype='node', color=False):
pmap = (graph.new_vertex_property if ptype == 'node'
else graph.new_edge_property)
if nonstring_container(value) and len(value):
if isinstance(value[0], str):
if color:
# custom namedcolors
return pmap("vector<double>",
vals=[ColorConverter.to_rgba(v) for v in value])
else:
return pmap("string", vals=value)
elif nonstring_container(value[0]):
# direct rgb(a) description
return pmap("vector<double>", vals=value)
# numbers
if color:
vmin, vmax = np.min(value), np.max(value)
normalized = None
if vmax - vmin > 0:
normalized = (np.array(value) - vmin) / (vmax - vmin)
else:
return normalized
return pmap("vector<double>", vals=[cmap(v) for v in normalized])
return pmap("double", vals=value)
return value
class GraphArtist(Artist):
"""
Matplotlib artist class that draws igraph graphs.
Only Cairo-based backends are supported.
Adapted from: https://stackoverflow.com/a/36154077/5962321
"""
def __init__(self, graph, axis, palette=None, *args, **kwds):
"""Constructs a graph artist that draws the given graph within
the given bounding box.
`graph` must be an instance of `igraph.Graph`.
`bbox` must either be an instance of `igraph.drawing.BoundingBox`
or a 4-tuple (`left`, `top`, `width`, `height`). The tuple
will be passed on to the constructor of `BoundingBox`.
`palette` is an igraph palette that is used to transform
numeric color IDs to RGB values. If `None`, a default grayscale
palette is used from igraph.
All the remaining positional and keyword arguments are passed
on intact to `igraph.Graph.__plot__`.
"""
from igraph import BoundingBox, palettes
super().__init__()
self.graph = graph
self.palette = palette or palettes["gray"]
self.bbox = BoundingBox(axis.bbox.bounds)
self.args = args
self.kwds = kwds
def draw(self, renderer):
from matplotlib.backends.backend_cairo import RendererCairo
if not isinstance(renderer, RendererCairo):
raise TypeError(
"graph plotting is supported only on Cairo backends")
self.graph.__plot__(renderer.gc.ctx, self.bbox, self.palette,
*self.args, **self.kwds)
def _circular_layout(graph, node_size):
max_nsize = np.max(node_size)
# chose radius such that r*dtheta > max_nsize
dtheta = 2*np.pi / graph.node_nb()
r = 1.1*max_nsize / dtheta
thetas = np.array([i*dtheta for i in range(graph.node_nb())])
x = r*np.cos(thetas)
y = r*np.sin(thetas)
return np.array((x, y)).T
|
Silmathoron/NNGT
|
nngt/plot/plt_networks.py
|
Python
|
gpl-3.0
| 70,660
|
[
"NEURON"
] |
bb180d6a6cabd606480871f25390b4ed7e1dbb4ef4133256b2c5470211739c69
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
espressopp.analysis.CenterOfMass
********************************
.. function:: espressopp.analysis.CenterOfMass(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_CenterOfMass
class CenterOfMassLocal(ObservableLocal, analysis_CenterOfMass):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_CenterOfMass, system)
if pmi.isController :
class CenterOfMass(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.CenterOfMassLocal'
)
|
govarguz/espressopp
|
src/analysis/CenterOfMass.py
|
Python
|
gpl-3.0
| 1,672
|
[
"ESPResSo"
] |
09b664b7e26f5b1d17624dbba585832248dc4cfe54b7045190c9e8761539d7c7
|
""" NOTA BENE: This agent should NOT be run alone. Instead, it serves as a base class for extensions.
The TaskManagerAgentBase is the base class to submit tasks to external systems,
monitor and update the tasks and file status in the transformation DB.
This agent is extended in WorkflowTaskAgent and RequestTaskAgent.
In case you want to further extend it you are required to follow the note on the
initialize method and on the _getClients method.
"""
__RCSID__ = "$Id$"
import time
import datetime
from Queue import Queue
from DIRAC import S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.Dictionaries import breakDictionaryIntoChunks
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getDNForUsername, getUsernameForDN
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.TransformationSystem.Client.FileReport import FileReport
from DIRAC.TransformationSystem.Client.TaskManager import WorkflowTasks
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Agent.TransformationAgentsUtilities import TransformationAgentsUtilities
from DIRAC.WorkloadManagementSystem.Client.JobManagerClient import JobManagerClient
AGENT_NAME = 'Transformation/TaskManagerAgentBase'
class TaskManagerAgentBase(AgentModule, TransformationAgentsUtilities):
""" To be extended. Please look at WorkflowTaskAgent and RequestTaskAgent.
"""
def __init__(self, *args, **kwargs):
""" c'tor
Always call this in the extension agent
"""
AgentModule.__init__(self, *args, **kwargs)
TransformationAgentsUtilities.__init__(self)
self.transClient = None
self.jobManagerClient = None
self.transType = []
self.tasksPerLoop = 50
self.maxParametricJobs = 20 # will be updated in execute()
# credentials
self.shifterProxy = None
self.credentials = None
self.credTuple = (None, None, None)
self.pluginLocation = ''
self.bulkSubmissionFlag = False
# for the threading
self.transQueue = Queue()
self.transInQueue = []
self.transInThread = {}
#############################################################################
def initialize(self):
""" Agent initialization.
The extensions MUST provide in the initialize method the following data members:
- TransformationClient objects (self.transClient),
- set the shifterProxy if different from the default one set here ('ProductionManager')
- list of transformation types to be looked (self.transType)
"""
gMonitor.registerActivity("SubmittedTasks", "Automatically submitted tasks", "Transformation Monitoring", "Tasks",
gMonitor.OP_ACUM)
self.pluginLocation = self.am_getOption('PluginLocation', 'DIRAC.TransformationSystem.Client.TaskManagerPlugin')
# Default clients
self.transClient = TransformationClient()
self.jobManagerClient = JobManagerClient()
# Bulk submission flag
self.bulkSubmissionFlag = self.am_getOption('BulkSubmission', self.bulkSubmissionFlag)
# Shifter credentials to use, could replace the use of shifterProxy eventually
self.shifterProxy = self.am_getOption('shifterProxy', self.shifterProxy)
self.credentials = self.am_getOption('ShifterCredentials', self.credentials)
resCred = self.__getCredentials()
if not resCred['OK']:
return resCred
# setting up the threading
maxNumberOfThreads = self.am_getOption('maxNumberOfThreads', 15)
threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads)
self.log.verbose("Multithreaded with %d threads" % maxNumberOfThreads)
for i in xrange(maxNumberOfThreads):
threadPool.generateJobAndQueueIt(self._execute, [i])
return S_OK()
def finalize(self):
""" graceful finalization
"""
if self.transInQueue:
self._logInfo("Wait for threads to get empty before terminating the agent (%d tasks)" %
len(self.transInThread))
self.transInQueue = []
while self.transInThread:
time.sleep(2)
self.log.info("Threads are empty, terminating the agent...")
return S_OK()
#############################################################################
def execute(self):
""" The TaskManagerBase execution method is just filling the Queues of transformations that need to be processed
"""
operationsOnTransformationDict = {}
owner, ownerGroup, ownerDN = None, None, None
# getting the credentials for submission
resProxy = getProxyInfo(proxy=False, disableVOMS=False)
if resProxy['OK']: # there is a shifterProxy
proxyInfo = resProxy['Value']
owner = proxyInfo['username']
ownerGroup = proxyInfo['group']
ownerDN = proxyInfo['identity']
self.log.info("ShifterProxy: Tasks will be submitted with the credentials %s:%s" % (owner, ownerGroup))
elif self.credentials:
owner, ownerGroup, ownerDN = self.credTuple
else:
self.log.info("Using per Transformation Credentials!")
# Determine whether the task status is to be monitored and updated
enableTaskMonitor = self.am_getOption('MonitorTasks', '')
if not enableTaskMonitor:
self.log.verbose("Monitoring of tasks is disabled. To enable it, create the 'MonitorTasks' option")
else:
# Get the transformations for which the tasks have to be updated
status = self.am_getOption('UpdateTasksTransformationStatus',
self.am_getOption('UpdateTasksStatus', ['Active', 'Completing', 'Stopped']))
transformations = self._selectTransformations(transType=self.transType, status=status, agentType=[])
if not transformations['OK']:
self.log.warn("Could not select transformations:", transformations['Message'])
else:
self._addOperationForTransformations(operationsOnTransformationDict, 'updateTaskStatus', transformations,
owner=owner, ownerGroup=ownerGroup, ownerDN=ownerDN)
# Determine whether the task files status is to be monitored and updated
enableFileMonitor = self.am_getOption('MonitorFiles', '')
if not enableFileMonitor:
self.log.verbose("Monitoring of files is disabled. To enable it, create the 'MonitorFiles' option")
else:
# Get the transformations for which the files have to be updated
status = self.am_getOption('UpdateFilesTransformationStatus',
self.am_getOption('UpdateFilesStatus', ['Active', 'Completing', 'Stopped']))
transformations = self._selectTransformations(transType=self.transType, status=status, agentType=[])
if not transformations['OK']:
self.log.warn("Could not select transformations:", transformations['Message'])
else:
self._addOperationForTransformations(operationsOnTransformationDict, 'updateFileStatus', transformations,
owner=owner, ownerGroup=ownerGroup, ownerDN=ownerDN)
# Determine whether the checking of reserved tasks is to be performed
enableCheckReserved = self.am_getOption('CheckReserved', '')
if not enableCheckReserved:
self.log.verbose("Checking of reserved tasks is disabled. To enable it, create the 'CheckReserved' option")
else:
# Get the transformations for which the check of reserved tasks have to be performed
status = self.am_getOption('CheckReservedTransformationStatus',
self.am_getOption('CheckReservedStatus', ['Active', 'Completing', 'Stopped']))
transformations = self._selectTransformations(transType=self.transType, status=status, agentType=[])
if not transformations['OK']:
self.log.warn("Could not select transformations:", transformations['Message'])
else:
self._addOperationForTransformations(operationsOnTransformationDict, 'checkReservedTasks', transformations,
owner=owner, ownerGroup=ownerGroup, ownerDN=ownerDN)
# Determine whether the submission of tasks is to be performed
enableSubmission = self.am_getOption('SubmitTasks', 'yes')
if not enableSubmission:
self.log.verbose("Submission of tasks is disabled. To enable it, create the 'SubmitTasks' option")
else:
# Get the transformations for which the check of reserved tasks have to be performed
status = self.am_getOption('SubmitTransformationStatus',
self.am_getOption('SubmitStatus', ['Active', 'Completing']))
transformations = self._selectTransformations(transType=self.transType, status=status)
if not transformations['OK']:
self.log.warn("Could not select transformations:", transformations['Message'])
else:
# Get the transformations which should be submitted
self.tasksPerLoop = self.am_getOption('TasksPerLoop', self.tasksPerLoop)
res = self.jobManagerClient.getMaxParametricJobs()
if not res['OK']:
self.log.warn("Could not get the maxParametricJobs from JobManager", res['Message'])
else:
self.maxParametricJobs = res['Value']
self._addOperationForTransformations(operationsOnTransformationDict, 'submitTasks', transformations,
owner=owner, ownerGroup=ownerGroup, ownerDN=ownerDN)
self._fillTheQueue(operationsOnTransformationDict)
return S_OK()
def _selectTransformations(self, transType=None, status=None, agentType=None):
""" get the transformations
"""
if status is None:
status = ['Active', 'Completing']
if agentType is None:
agentType = ['Automatic']
selectCond = {}
if status:
selectCond['Status'] = status
if transType is not None:
selectCond['Type'] = transType
if agentType:
selectCond['AgentType'] = agentType
res = self.transClient.getTransformations(condDict=selectCond)
if not res['OK']:
self.log.error("Failed to get transformations:", res['Message'])
elif not res['Value']:
self.log.verbose("No transformations found")
else:
self.log.verbose("Obtained %d transformations" % len(res['Value']))
return res
def _fillTheQueue(self, operationsOnTransformationsDict):
""" Just fill the queue with the operation to be done on a certain transformation
"""
count = 0
for transID, bodyAndOps in operationsOnTransformationsDict.iteritems():
if transID not in self.transInQueue:
count += 1
self.transInQueue.append(transID)
self.transQueue.put({transID: bodyAndOps})
self.log.info("Out of %d transformations, %d put in thread queue" % (len(operationsOnTransformationsDict),
count))
#############################################################################
def _getClients(self, ownerDN=None, ownerGroup=None):
"""Returns the clients used in the threads
This is another function that should be extended.
The clients provided here are defaults, and should be adapted
If ownerDN and ownerGroup are not None the clients will delegate to these credentials
:param str ownerDN: DN of the owner of the submitted jobs
:param str ownerGroup: group of the owner of the submitted jobs
:returns: dict of Clients
"""
threadTransformationClient = TransformationClient()
threadTaskManager = WorkflowTasks(ownerDN=ownerDN, ownerGroup=ownerGroup)
threadTaskManager.pluginLocation = self.pluginLocation
return {'TransformationClient': threadTransformationClient,
'TaskManager': threadTaskManager}
def _execute(self, threadID):
""" This is what runs inside the threads, in practice this is the function that does the real stuff
"""
# Each thread will have its own clients if we use credentials/shifterProxy
clients = self._getClients() if self.shifterProxy else \
self._getClients(ownerGroup=self.credTuple[1], ownerDN=self.credTuple[2]) if self.credentials \
else None
method = '_execute'
operation = 'None'
while True:
startTime = time.time()
transIDOPBody = self.transQueue.get()
if not self.transInQueue:
# Queue was cleared, nothing to do
continue
try:
transID = transIDOPBody.keys()[0]
operations = transIDOPBody[transID]['Operations']
if transID not in self.transInQueue:
self._logWarn("Got a transf not in transInQueue...?",
method=method, transID=transID)
break
if not (self.credentials or self.shifterProxy):
ownerDN, group = transIDOPBody[transID]['OwnerDN'], transIDOPBody[transID]['OwnerGroup']
clients = self._getClients(ownerDN=ownerDN, ownerGroup=group)
self.transInThread[transID] = ' [Thread%d] [%s] ' % (threadID, str(transID))
self._logInfo("Start processing transformation", method=method, transID=transID)
clients['TaskManager'].transInThread = self.transInThread
for operation in operations:
self._logInfo("Executing %s" % operation, method=method, transID=transID)
startOperation = time.time()
res = getattr(self, operation)(transIDOPBody, clients)
if not res['OK']:
self._logError("Failed to %s: %s" % (operation, res['Message']), method=method, transID=transID)
self._logInfo("Executed %s in %.1f seconds" % (operation, time.time() - startOperation),
method=method, transID=transID)
except Exception as x: # pylint: disable=broad-except
self._logException('Exception executing operation %s' % operation, lException=x,
method=method, transID=transID)
finally:
if not transID:
transID = 'None'
self._logInfo("Processed transformation in %.1f seconds" % (time.time() - startTime),
method=method, transID=transID)
self.transInThread.pop(transID, None)
self._logVerbose("%d transformations still in queue" % (len(self.transInThread)),
method=method, transID=transID)
if transID in self.transInQueue:
self.transInQueue.remove(transID)
self._logDebug("transInQueue = ", self.transInQueue,
method=method, transID=transID)
#############################################################################
# real operations done
def updateTaskStatus(self, transIDOPBody, clients):
""" Updates the task status
"""
transID = transIDOPBody.keys()[0]
method = 'updateTaskStatus'
# Get the tasks which are in an UPDATE state
updateStatus = self.am_getOption('TaskUpdateStatus', ['Checking', 'Deleted', 'Killed', 'Staging', 'Stalled',
'Matched', 'Scheduled', 'Rescheduled', 'Completed',
'Submitted', 'Assigned', 'Received',
'Waiting', 'Running'])
condDict = {"TransformationID": transID, "ExternalStatus": updateStatus}
timeStamp = str(datetime.datetime.utcnow() - datetime.timedelta(minutes=10))
# Get transformation tasks
transformationTasks = clients['TransformationClient'].getTransformationTasks(condDict=condDict,
older=timeStamp,
timeStamp='LastUpdateTime')
if not transformationTasks['OK']:
self._logError("Failed to get tasks to update:", transformationTasks['Message'],
method=method, transID=transID)
return transformationTasks
if not transformationTasks['Value']:
self._logVerbose("No tasks found to update",
method=method, transID=transID)
return transformationTasks
# Get status for the transformation tasks
chunkSize = self.am_getOption('TaskUpdateChunkSize', 0)
try:
chunkSize = int(chunkSize)
except ValueError:
chunkSize = 0
if chunkSize:
self._logVerbose("Getting %d tasks status (chunks of %d)" %
(len(transformationTasks['Value']), chunkSize),
method=method, transID=transID)
else:
self._logVerbose("Getting %d tasks status" %
len(transformationTasks['Value']),
method=method, transID=transID)
updated = {}
for nb, taskChunk in enumerate(breakListIntoChunks(transformationTasks['Value'], chunkSize)
if chunkSize else
[transformationTasks['Value']]):
submittedTaskStatus = clients['TaskManager'].getSubmittedTaskStatus(taskChunk)
if not submittedTaskStatus['OK']:
self._logError("Failed to get updated task states:", submittedTaskStatus['Message'],
method=method, transID=transID)
return submittedTaskStatus
statusDict = submittedTaskStatus['Value']
if not statusDict:
self._logVerbose("%4d: No tasks to update" % nb,
method=method, transID=transID)
# Set status for tasks that changes
for status, taskIDs in statusDict.iteritems():
self._logVerbose("%4d: Updating %d task(s) to %s" % (nb, len(taskIDs), status),
method=method, transID=transID)
setTaskStatus = clients['TransformationClient'].setTaskStatus(transID, taskIDs, status)
if not setTaskStatus['OK']:
self._logError("Failed to update task status for transformation:", setTaskStatus['Message'],
method=method, transID=transID)
return setTaskStatus
updated[status] = updated.setdefault(status, 0) + len(taskIDs)
for status, nb in updated.iteritems():
self._logInfo("Updated %d tasks to status %s" % (nb, status),
method=method, transID=transID)
return S_OK()
def updateFileStatus(self, transIDOPBody, clients):
""" Update the files status
"""
transID = transIDOPBody.keys()[0]
method = 'updateFileStatus'
timeStamp = str(datetime.datetime.utcnow() - datetime.timedelta(minutes=10))
# get transformation files
condDict = {'TransformationID': transID, 'Status': ['Assigned']}
transformationFiles = clients['TransformationClient'].getTransformationFiles(condDict=condDict,
older=timeStamp,
timeStamp='LastUpdate')
if not transformationFiles['OK']:
self._logError("Failed to get transformation files to update:", transformationFiles['Message'],
method=method, transID=transID)
return transformationFiles
if not transformationFiles['Value']:
self._logInfo("No files to be updated",
method=method, transID=transID)
return transformationFiles
# Get the status of the transformation files
# Sort the files by taskID
taskFiles = {}
for fileDict in transformationFiles['Value']:
taskFiles.setdefault(fileDict['TaskID'], []).append(fileDict)
chunkSize = 100
self._logVerbose("Getting file status for %d tasks (chunks of %d)" %
(len(taskFiles), chunkSize),
method=method, transID=transID)
updated = {}
# Process 100 tasks at a time
for nb, taskIDs in enumerate(breakListIntoChunks(taskFiles, chunkSize)):
fileChunk = []
for taskID in taskIDs:
fileChunk += taskFiles[taskID]
submittedFileStatus = clients['TaskManager'].getSubmittedFileStatus(fileChunk)
if not submittedFileStatus['OK']:
self._logError("Failed to get updated file states for transformation:", submittedFileStatus['Message'],
method=method, transID=transID)
return submittedFileStatus
statusDict = submittedFileStatus['Value']
if not statusDict:
self._logVerbose("%4d: No file states to be updated" % nb,
method=method, transID=transID)
continue
# Set the status of files
fileReport = FileReport(server=clients['TransformationClient'].getServer())
for lfn, status in statusDict.iteritems():
updated[status] = updated.setdefault(status, 0) + 1
setFileStatus = fileReport.setFileStatus(transID, lfn, status)
if not setFileStatus['OK']:
return setFileStatus
commit = fileReport.commit()
if not commit['OK']:
self._logError("Failed to update file states for transformation:", commit['Message'],
method=method, transID=transID)
return commit
else:
self._logVerbose("%4d: Updated the states of %d files" % (nb, len(commit['Value'])),
method=method, transID=transID)
for status, nb in updated.iteritems():
self._logInfo("Updated %d files to status %s" % (nb, status),
method=method, transID=transID)
return S_OK()
def checkReservedTasks(self, transIDOPBody, clients):
""" Checking Reserved tasks
"""
transID = transIDOPBody.keys()[0]
method = 'checkReservedTasks'
# Select the tasks which have been in Reserved status for more than 1 hour for selected transformations
condDict = {"TransformationID": transID, "ExternalStatus": 'Reserved'}
time_stamp_older = str(datetime.datetime.utcnow() - datetime.timedelta(hours=1))
res = clients['TransformationClient'].getTransformationTasks(condDict=condDict, older=time_stamp_older)
self._logDebug("getTransformationTasks(%s) return value:" % condDict, res,
method=method, transID=transID)
if not res['OK']:
self._logError("Failed to get Reserved tasks:", res['Message'],
method=method, transID=transID)
return res
if not res['Value']:
self._logVerbose("No Reserved tasks found", transID=transID)
return res
reservedTasks = res['Value']
# Update the reserved tasks
res = clients['TaskManager'].updateTransformationReservedTasks(reservedTasks)
self._logDebug("updateTransformationReservedTasks(%s) return value:" % reservedTasks, res,
method=method, transID=transID)
if not res['OK']:
self._logError("Failed to update transformation reserved tasks:", res['Message'],
method=method, transID=transID)
return res
noTasks = res['Value']['NoTasks']
taskNameIDs = res['Value']['TaskNameIDs']
# For the tasks with no associated request found re-set the status of the task in the transformationDB
if noTasks:
self._logInfo("Resetting status of %d tasks to Created as no associated job/request found" % len(noTasks),
method=method, transID=transID)
for taskName in noTasks:
transID, taskID = self._parseTaskName(taskName)
res = clients['TransformationClient'].setTaskStatus(transID, taskID, 'Created')
if not res['OK']:
self._logError("Failed to update task status and ID after recovery:",
'%s %s' % (taskName, res['Message']),
method=method, transID=transID)
return res
# For the tasks for which an associated request was found update the task details in the transformationDB
for taskName, extTaskID in taskNameIDs.items():
transID, taskID = self._parseTaskName(taskName)
self._logInfo("Setting status of %s to Submitted with ID %s" % (taskName, extTaskID),
method=method, transID=transID)
setTaskStatusAndWmsID = clients['TransformationClient'].setTaskStatusAndWmsID(transID, taskID,
'Submitted', str(extTaskID))
if not setTaskStatusAndWmsID['OK']:
self._logError("Failed to update task status and ID after recovery:",
"%s %s" % (taskName, setTaskStatusAndWmsID['Message']),
method=method, transID=transID)
return setTaskStatusAndWmsID
return S_OK()
def submitTasks(self, transIDOPBody, clients):
""" Submit the tasks to an external system, using the taskManager provided
:param dict transIDOPBody: transformation body
:param dict clients: dictionary of client objects
:return: S_OK/S_ERROR
"""
transID = transIDOPBody.keys()[0]
transBody = transIDOPBody[transID]['Body']
owner = transIDOPBody[transID]['Owner']
ownerGroup = transIDOPBody[transID]['OwnerGroup']
ownerDN = transIDOPBody[transID]['OwnerDN']
method = 'submitTasks'
# Get all tasks to submit
tasksToSubmit = clients['TransformationClient'].getTasksToSubmit(transID, self.tasksPerLoop)
self._logDebug("getTasksToSubmit(%s, %s) return value:" % (transID, self.tasksPerLoop), tasksToSubmit,
method=method, transID=transID)
if not tasksToSubmit['OK']:
self._logError("Failed to obtain tasks:", tasksToSubmit['Message'],
method=method, transID=transID)
return tasksToSubmit
tasks = tasksToSubmit['Value']['JobDictionary']
if not tasks:
self._logVerbose("No tasks found for submission",
method=method, transID=transID)
return tasksToSubmit
self._logInfo("Obtained %d tasks for submission" % len(tasks),
method=method, transID=transID)
# Prepare tasks and submits them, by chunks
chunkSize = self.maxParametricJobs if self.bulkSubmissionFlag else self.tasksPerLoop
for taskDictChunk in breakDictionaryIntoChunks(tasks, chunkSize):
res = self._prepareAndSubmitAndUpdateTasks(transID, transBody, taskDictChunk,
owner, ownerDN, ownerGroup,
clients)
if not res['OK']:
return res
self._logVerbose("Submitted %d jobs, bulkSubmissionFlag = %s" % (len(taskDictChunk), self.bulkSubmissionFlag))
return S_OK()
def _prepareAndSubmitAndUpdateTasks(self, transID, transBody, tasks, owner, ownerDN, ownerGroup, clients):
""" prepare + submit + monitor a dictionary of tasks
:param int transID: transformation ID
:param str transBody: transformation job template
:param dict tasks: dictionary of per task parameters
:param str owner: owner of the transformation
:param str ownerDN: DN of the owner of the transformation
:param str ownerGroup: group of the owner of the transformation
:param dict clients: dictionary of client objects
:return: S_OK/S_ERROR
"""
method = '_prepareAndSubmitAndUpdateTasks'
# prepare tasks
preparedTransformationTasks = clients['TaskManager'].prepareTransformationTasks(transBody,
tasks,
owner,
ownerGroup,
ownerDN,
self.bulkSubmissionFlag)
self._logDebug("prepareTransformationTasks return value:", preparedTransformationTasks,
method=method, transID=transID)
if not preparedTransformationTasks['OK']:
self._logError("Failed to prepare tasks", preparedTransformationTasks['Message'],
method=method, transID=transID)
return preparedTransformationTasks
# Submit tasks
res = clients['TaskManager'].submitTransformationTasks(preparedTransformationTasks['Value'])
self._logDebug("submitTransformationTasks return value:", res,
method=method, transID=transID)
if not res['OK']:
self._logError("Failed to submit prepared tasks:", res['Message'],
method=method, transID=transID)
return res
# Update tasks after submission
res = clients['TaskManager'].updateDBAfterTaskSubmission(res['Value'])
self._logDebug("updateDBAfterTaskSubmission return value:", res,
method=method, transID=transID)
if not res['OK']:
self._logError("Failed to update DB after task submission:", res['Message'],
method=method, transID=transID)
return res
return S_OK()
@staticmethod
def _addOperationForTransformations(operationsOnTransformationDict, operation, transformations,
owner=None, ownerGroup=None, ownerDN=None):
"""Fill the operationsOnTransformationDict"""
transformationIDsAndBodies = [(transformation['TransformationID'],
transformation['Body'],
transformation['AuthorDN'],
transformation['AuthorGroup']) for transformation in transformations['Value']]
for transID, body, t_ownerDN, t_ownerGroup in transformationIDsAndBodies:
if transID in operationsOnTransformationDict:
operationsOnTransformationDict[transID]['Operations'].append(operation)
else:
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': [operation],
'Owner': owner if owner else getUsernameForDN(t_ownerDN)['Value'],
'OwnerGroup': ownerGroup if owner else t_ownerGroup,
'OwnerDN': ownerDN if owner else t_ownerDN}
def __getCredentials(self):
"""Get the credentials to use if ShifterCredentials are set, otherwise do nothing.
This function fills the self.credTuple tuple.
"""
if not self.credentials:
return S_OK()
resCred = Operations().getOptionsDict("/Shifter/%s" % self.credentials)
if not resCred['OK']:
self.log.error("Cred: Failed to find shifter credentials", self.credentials)
return resCred
owner = resCred['Value']['User']
ownerGroup = resCred['Value']['Group']
# returns a list
ownerDN = getDNForUsername(owner)['Value'][0]
self.credTuple = (owner, ownerGroup, ownerDN)
self.log.info("Cred: Tasks will be submitted with the credentials %s:%s" % (owner, ownerGroup))
return S_OK()
|
andresailer/DIRAC
|
TransformationSystem/Agent/TaskManagerAgentBase.py
|
Python
|
gpl-3.0
| 31,028
|
[
"DIRAC"
] |
ba0207880bba1ec42b66e8bba391d78da030890b257322e427a35b7d44447e23
|
import os
import math
import subprocess
from omg import atoms
from omg import iolines
class FlagInfo():
def __init__(self, ptr_byte, ptr_line, read_n_lines, textformat):
self.byte = ptr_byte
self.line = ptr_line # Unused
self.read_n_lines = read_n_lines
(self.data_per_line,
self.chars_per_data,
self.datatype) = self._interpret_format(textformat)
def _interpret_format(self, frmt):
""" Takes something like 10I8 and gives 10 + I + 8"""
nondigits = []
for char in frmt:
if not char.isdigit():
nondigits.append(char)
# First is data type, second (if exists) is a dot. Example: 5E16.8
datatype = nondigits[0]
# Nlinedata is the number of data per line.
Nlinedata = int(frmt.split(datatype)[0])
if len(nondigits) == 1:
datasize = int(frmt.split(datatype)[1])
elif len(nondigits) == 2:
datasize = int(frmt.split(datatype)[1].split('.')[0])
else:
raise RuntimeError('Format string --- %s ---does \
not look like these:\n \
20a4 or 5E16.8' % (frmt))
return Nlinedata, datasize, datatype # Ndata * sizedata = 80
class Prmtop():
def __init__(self, name, file_limit_MB=150):
self.name = name
self._flags = self._get_all_flags(file_limit_MB)
###self.all_data_dict = self._read_all()
def _get_all_flags(self, file_limit_MB):
file_size_MB = float(os.stat(self.name).st_size) / (1024**2)
if file_size_MB > file_limit_MB:
raise RuntimeError(
'File size is %f MB. Max = %f MB' % (
file_size_MB, file_limit_MB))
flags_dict = {}
### Use UNIX grep to get line/byte indexes of %FLAGs
grep_output = subprocess.Popen('grep -nb "%%FLAG" %s' % (self.name),
shell=True,
stdout=subprocess.PIPE)
flag_list = grep_output.communicate()[0].split('\n')[:-1]
#flag_list = getoutput('grep -nb "%%FLAG" %s' % (self.name)).split('\n')
### Get formats for each flag assuming same order :-)
grep_output = subprocess.Popen('grep -n "%%FORMAT" %s' % (self.name),
shell=True,
stdout=subprocess.PIPE)
format_list = grep_output.communicate()[0].split()
format_list = [frmt.split('(')[1].split(')')[0] for frmt in format_list]
# Generate lines list
flag_lines = [ int(flag.split(':')[0]) for flag in flag_list]
### Artifitially append number of lines plus one to flag_lines
### will be usefull when knowing the number of data lines in the
### last flag.
grep_output=subprocess.Popen('wc -l %s' % (self.name), shell=True,
stdout=subprocess.PIPE)
total_lines = int(grep_output.communicate()[0].split()[0])
flag_lines.append(total_lines + 1)
i = 0
for flag in flag_list:
name = flag.split(' ')[1]
pointer_byte = int(flag.split(':')[1])
pointer_line = flag_lines[i]
textformat = format_list[i]
# data between flags excluding %FLAG and %FORMAT
read_n_lines = -2 + flag_lines[i+1] - flag_lines[i]
i += 1
flags_dict[name] = FlagInfo(
pointer_byte, pointer_line, read_n_lines, textformat)
return flags_dict
def read_flag(self, flag_str):
# Seek appropriate flag object
flagObj = self._flags[flag_str]
f = open(self.name)
f.seek(flagObj.byte)
# Skip two lines (%FLAG and %FORMAT)
for i in range(2):
line = f.readline()
data_list = []
for i in range(flagObj.read_n_lines): #LOOP over lines
line = f.readline()
for j in range(int(len(line.strip('\n')) / flagObj.chars_per_data)):
data_as_str = line[j*flagObj.chars_per_data :
(j+1)*flagObj.chars_per_data]
data_list.append(data_as_str)
# Recognized data types:
# I - int | E - float | a - string
data = []
if flagObj.datatype == 'I':
data = [int(rawdata) for rawdata in data_list]
elif flagObj.datatype == 'E':
data = [float(rawdata) for rawdata in data_list]
elif flagObj.datatype == 'a':
data = [rawdata.replace(' ','') for rawdata in data_list]
f.close()
return data
def _read_all(self):
all_data = {}
for flag in self._flags:
print (flag)
all_data[flag] = self.read_flag(self._flags[flag])
return all_data
def vmdsel(self, selexpr):
atom_name = self.read_flag('ATOM_NAME')
self.n_atoms = self.read_flag('POINTERS')[0]
resname_raw = self.read_flag('RESIDUE_LABEL')
resi_idx = self.read_flag('RESIDUE_POINTER')
resid = self._pointers2list(resi_idx, self.n_atoms) # returns strings
resname = [resname_raw[int(resid[i])-1] for i in range(self.n_atoms)]
master_approved = [True for i in range(self.n_atoms)]
# Make some self available
self.pdb_atom_name = atom_name
self.pdb_resname = resname
self.pdb_resid = resid
# empty selection == all atoms
if selexpr == '':
return [ i for i in range(self.n_atoms)]
vmdsel_list = selexpr.split('and')
for expr in vmdsel_list:
neg, sele_by, selection_items = self._parse_unit(expr)
if sele_by == 'name': atom_list = atom_name
if sele_by == 'resid': atom_list = resid
if sele_by == 'resname': atom_list = resname
approved = self._unitsel(neg, selection_items, atom_list)
for i in range(self.n_atoms):
if not approved[i]:
master_approved[i] = False
# Create list of approved atom indexes
# Create self lists of pdbInfo for selected atoms
self.vmdsel_name = []
self.vmdsel_resname = []
self.vmdsel_resid = []
approved_indexes = []
for i in range(self.n_atoms):
if master_approved[i]:
approved_indexes.append(i)
self.vmdsel_name .append(atom_name[i])
self.vmdsel_resname.append(resname [i])
self.vmdsel_resid .append(resid [i])
return approved_indexes
def _pointers2list(self, pointrs, N):
out_list = [0 for i in range(N)]
pointrs.append('fake pointer')
pointr_idx = 0
for i in range(N):
# index starting at 1, as specified in pointrs
if pointrs[pointr_idx] == i+1:
pointr_idx += 1
out_list[i] = str(pointr_idx)
return out_list
def _parse_unit(self, unit):
"""called in vmdselection"""
unit_fields = unit.split()
u = 0
# Negation?
if unit_fields[0] == 'not':
negative = True
u += 1
else: negative = False
# Selecting by what?
sele_by = unit_fields[u]
u += 1
# List of what to select
sele_items = unit_fields[u:]
# Selection range? (resid only)
if (sele_by == 'resid') and 'to' in sele_items:
n_TOs = sele_items.count('to')
for i in range(n_TOs):
idx = sele_items.index('to')
start = int(sele_items[idx-1])
stop = int(sele_items[idx+1])
sele_items.pop(idx)
requested_range = []
int_range = [ i for i in range(start+1,stop)]
int_range.reverse()
for i in int_range:
sele_items.insert(idx,str(i))
#print 'Negation: ' + str(negative)
#print 'Select_by: ' + sele_by
#print 'Items: ' + str(sele_items)
return negative, sele_by, sele_items
def _unitsel(self, negation, selection_list, atom_list):
n_atoms = len(atom_list)
approvals = [False for i in range(n_atoms)]
i = 0
for atom in atom_list:
if atom in selection_list:
approvals[i] = True
i += 1
if negation:
for i in range(n_atoms):
if approvals[i]:
approvals[i] = False
elif not approvals[i]:
approvals[i] = True
return approvals
def _is_new_dihedral(self, dihedrals_list, new_dihe):
isnew = True
# Check if dihedral with same atoms already exists
for existing_dihe in dihedrals_list:
if new_dihe.has_same_atoms(existing_dihe):
isnew = False
# Check if same atoms lead to same values
if not new_dihe.has_same_values(existing_dihe):
print ('WARNING: INCONSISTENT parameters:')
print (new_dihe.print_gaussian_way())
print (existing_dihe.print_gaussian_way())
return isnew
def _retrieve_parm_dihedral(self):
force_list = self.read_flag('DIHEDRAL_FORCE_CONSTANT')
perio_list = self.read_flag('DIHEDRAL_PERIODICITY')
phase_list = self.read_flag('DIHEDRAL_PHASE')
inc_h = self.read_flag('DIHEDRALS_INC_HYDROGEN')
not_h = self.read_flag('DIHEDRALS_WITHOUT_HYDROGEN')
diheds = inc_h + not_h
n_dihed = int(len(diheds)/5)
print ('Decrypting %d dihedrals/impropers...' % (n_dihed))
dihe_out = []
impropers_out = []
buffed = False
last_idxs = ( 0,0,0,0)
for i in range(n_dihed-1,-1,-1): # going backwards
# Set up atoms
idx1 = diheds[i*5+0]/3
idx2 = diheds[i*5+1]/3
idx3 = diheds[i*5+2]/3
idx4 = diheds[i*5+3]/3
mm1 = self.upper_atom_types[int(idx1)]
mm2 = self.upper_atom_types[int(idx2)]
mm3 = self.upper_atom_types[int(abs(idx3))]
mm4 = self.upper_atom_types[int(abs(idx4))]
current_idxs = (idx1, idx2, abs(idx3), abs(idx4))
# Read values for this dihedral term
idx = diheds[i*5+4]-1 # -1 for zero indexing
pk = force_list[idx]
pn = perio_list[idx]
phase = phase_list[idx]
# First, check if is dihedral or improper
# If dihedral, check if buffed.
# Not buffed: append it or buff it.
# Buffed: add + append, or add!
if idx4 > 0: # is dihedral, not improper
if not buffed:
# no dihedral term waiting for another term
# First create the dihedral term.
this_dihe = parm_dihedral(mm1,mm2,mm3,mm4, pk,phase,pn)
if idx3 > 0:
# single term. Just append.
if self._is_new_dihedral(dihe_out, this_dihe):
dihe_out.append(this_dihe)
elif idx3 < 0:
# multi term (if next entry has same atoms)
buffed = True
else:
# first atom in third position? Raise Error!
raise RuntimeError ('is -0 negative?')
# Now the tricky part. If these atoms are not the same
# atoms as in the dihedral term waiting, then it was not
# supposed to buffed and should have been appended already
elif buffed: # dihedral term waiting
if current_idxs != last_idxs:
# Append the buffed part.
if self._is_new_dihedral(dihe_out, this_dihe):
dihe_out.append(this_dihe)
# Create new dihedral term
this_dihe = parm_dihedral(mm1,mm2,mm3,mm4, pk,phase,pn)
# Now append if not buffed
if 0 < idx3:
if self._is_new_dihedral(dihe_out, this_dihe):
dihe_out.append(this_dihe)
elif current_idxs == last_idxs:
# Add a term
this_dihe.add_term(mm1,mm2,mm3,mm4, pk,phase,pn)
if 0 < idx3:
# Finalize the dihedral by appending it
if self._is_new_dihedral(dihe_out, this_dihe):
dihe_out.append(this_dihe)
buffed = idx3 < 0
else: # is Improper
this_improper = parm_improper(
mm1, mm2, mm3, mm4,
pk, phase, pn)
# Check consistency
is_improper_new = True
for existing_imp in impropers_out:
if this_improper.has_same_atoms(existing_imp):
is_improper_new = False
if not this_improper.has_same_values(existing_imp):
print ('WARNING: INCONSISTENT parameters:')
print (this_improper.print_gaussian_way())
print (existing_imp.print_gaussian_way())
# dont break cycle to check all impropers
# Append if new
if is_improper_new:
impropers_out.append(this_improper)
last_idxs = current_idxs
return dihe_out, impropers_out
def _retrieve_parm_angle(self):
n_atoms = self.read_flag('POINTERS')[0]
# Read parms
force_list = self.read_flag('ANGLE_FORCE_CONSTANT')
equil_list = self.read_flag('ANGLE_EQUIL_VALUE')
angles_inc_h = self.read_flag('ANGLES_INC_HYDROGEN')
angles_not_h = self.read_flag('ANGLES_WITHOUT_HYDROGEN')
#amber_atom_type = self.read_flag('AMBER_ATOM_TYPE')
# All angles
angles = angles_inc_h + angles_not_h
angles_out = []
n_angles = int(len(angles)/4)
print ('Decrypting %d angles...' % (n_angles))
for i in range(n_angles):
idx1 = int(angles[i*4+0]/3) # atom 1 index
idx2 = int(angles[i*4+1]/3) # atom 2 index
idx3 = int(angles[i*4+2]/3)
if (idx1 in self.atom_sel_idx and
idx2 in self.atom_sel_idx and
idx3 in self.atom_sel_idx):
angle_idx = angles[i*4+3]-1 # 1 indexed
amber_type1 = self.upper_atom_types[idx1]
amber_type2 = self.upper_atom_types[idx2]
amber_type3 = self.upper_atom_types[idx3]
force = force_list[angle_idx]
equil = equil_list[angle_idx]
this_angle = parm_angle(
amber_type1, amber_type2, amber_type3, equil, force)
# Verify if angle exists and conflicts
is_angle_new = True
for existing_angle in angles_out:
if this_angle.has_same_atoms(existing_angle):
is_angle_new = False
if not this_angle.has_same_values(existing_angle):
print ('WARNING: INCONSISTENT parameters:')
print (this_angle.print_gaussian_way())
print (existing_angle.print_gaussian_way())
# dont break cycle to check all angles
if is_angle_new:
angles_out.append(this_angle)
return angles_out
def _gen_zmat(self, inpcrd_name):
text = ''
chargelist = self.read_flag('CHARGE')
if 'ATOMIC_NUMBER' in self._flags: # old amboniom_elements.py (glycam)
elements = self._guess_elements()
else:
elements = self._read_elements()
total_charge = 0
X,Y,Z = self._coords_from_inpcrd(inpcrd_name)
j = 0
for i in self.atom_sel_idx:
name = self.pdb_atom_name[i].replace('-','').replace('+','')
residue_name = self.pdb_resname[i].replace('-','').replace('+','')
residue_number = self.pdb_resid[i]
mm_type = self.upper_atom_types[i]
charge = chargelist[i] / 18.2223
total_charge += charge
mask = '0'
x = X[i]
y = Y[i]
z = Z[i]
chain = ''
layer = 'L'
element = elements[j]
j += 1
link_element = link_mm_type = link_bound_to = link_scale1 = None
# create atom
this_atom = atoms.Atom(element, (x, y, z))
resinfo = atoms.RESinfo(name, residue_name, residue_number, chain)
this_atom.set_resinfo(resinfo)
oniominfo = atoms.Oniom(mask, layer)
this_atom.set_oniom(oniominfo)
mm = atoms.MM(mm_type, charge)
this_atom.set_mm(mm)
# print z-matrix for atom
text += iolines.atom2zmat(this_atom)
# Add stuff to text
header = ''
#header += '%nproc=4\n'
#header += '%mem=2GB\n'
#header += '%chk=chk.chk\n'
header += '# amber=softonly geom=connectivity\n\n'
header += 'Se amanha nao chover vai estar um lindo dia de sol\n\n'
header += 'Sum of partial charges: %f\n' % (total_charge)
return header + text
def _gen_connectivity(self):
print ('Decrypting connectivity from bonds...')
bonds_inc_h = self.read_flag('BONDS_INC_HYDROGEN')
bonds_not_h = self.read_flag('BONDS_WITHOUT_HYDROGEN')
bonds = bonds_inc_h + bonds_not_h
n_bonds = int(len(bonds)/3)
conn = [[] for i in range(len(self.atom_sel_idx))]
for i in range(n_bonds):
idx1 = int(bonds[i*3+0]/3) # atom 1 index
idx2 = int(bonds[i*3+1]/3) # atom 2 index
if idx1 in self.atom_sel_idx and idx2 in self.atom_sel_idx:
sorted_idx = [self.atom_sel_idx.index(idx1),
self.atom_sel_idx.index(idx2)]
sorted_idx.sort()
conn[sorted_idx[0]].append(sorted_idx[1])
# Sort within each atom
for atom in conn:
atom.sort()
# Print with style
text = ''
for i in range(len(conn)):
text += '%6d' % (i+1) # Atom number
already_there = []
for j in conn[i]:
if j not in already_there:
text += '%6d 1.0' % (j+1)
already_there.append(j)
text += '\n'
return text
def _retrieve_parm_bond(self):
n_atoms = self.read_flag('POINTERS')[0]
# Read parms
force_list = self.read_flag('BOND_FORCE_CONSTANT')
equil_list = self.read_flag('BOND_EQUIL_VALUE')
bonds_inc_h = self.read_flag('BONDS_INC_HYDROGEN')
bonds_not_h = self.read_flag('BONDS_WITHOUT_HYDROGEN')
# Get atom types
# Will be change to self.atoms_retyped[vmd_sel]
amber_atom_type = self.read_flag('AMBER_ATOM_TYPE')
# All bonds
bonds = bonds_inc_h + bonds_not_h
bonds_out = []
n_bonds = int(len(bonds)/3)
print ('Decrypting %d bonds...' % (n_bonds))
for i in range(n_bonds):
idx1 = int(bonds[i*3+0]/3) # atom 1 index
idx2 = int(bonds[i*3+1]/3) # atom 2 index
if idx1 in self.atom_sel_idx and idx2 in self.atom_sel_idx:
bond_idx = bonds[i*3+2]-1 # 1 indexed
amber_type1 = self.upper_atom_types[idx1]
amber_type2 = self.upper_atom_types[idx2]
force = force_list[bond_idx]
equil = equil_list[bond_idx]
#print(amber_type1, amber_type2, force, equil)
this_bond = parm_bond(
amber_type1, amber_type2, equil, force)
# Verify if bond exists and conflicts
is_bond_new = True
for existing_bond in bonds_out:
if this_bond.has_same_atoms(existing_bond):
is_bond_new = False
if not this_bond.has_same_values(existing_bond):
print ('WARNING: INCONSISTENT parameters:')
print (this_bond.print_gaussian_way())
print (existing_bond.print_gaussian_way())
if is_bond_new:
bonds_out.append(this_bond)
return bonds_out
def _gen_gaff_uppercase(self):
amber_type_list = self.read_flag('AMBER_ATOM_TYPE')
amber = []
others = []
digitstart = []
# Make list of uppercases (AMBER) and lowercases (GAFF)
for atomtype in amber_type_list:
if atomtype == atomtype.upper():
if atomtype[0].isdigit():
if atomtype not in digitstart:
digitstart.append(atomtype)
elif atomtype not in amber:
amber.append(atomtype)
else:
if atomtype[0].isdigit():
if atomtype not in digitstart:
digitstart.append(atomtype)
elif atomtype not in others:
others.append(atomtype)
# else:
# print ('WHAT atomtype IS THIS??? ---> %s' % (atomtype))
# raise RuntimeError ('Mixed lower/uppercase atom type')
# Retypers and substitutes for second letter
retype = {}
alternative_list = []
substitutes = 'JKXYZ89IV567FGHQRSTULW' # should be enough
for atomtype in others: #
if atomtype.upper() in amber: # need to retype
found_alternative = False
for s in substitutes:
alternative = atomtype[0].upper() + s
if (alternative not in amber and
alternative not in others and
alternative not in alternative_list):
found_alternative = True
retype[atomtype] = alternative
alternative_list.append(alternative)
break
if not found_alternative:
raise RuntimeError ('Could not retype %s' %(atomtype))
for atomtype in digitstart: #
found_alternative = False
for s in substitutes:
alternative = atomtype[1].upper() + s
if (alternative not in amber and
alternative not in others and
alternative not in alternative_list):
found_alternative = True
retype[atomtype] = alternative
alternative_list.append(alternative)
break
if not found_alternative:
raise RuntimeError ('Could not retype %s' %(atomtype))
if len(retype) > 0:
print ('** lowercase amber atom types (GAFF) have been retyped:')
print ('---------------')
print ('original -> new')
print ('---------------')
for r in retype:
print (' %2s %2s' % (r, retype[r]))
print ('---------------')
print ('\nNOTE:')
print (' retyping link-atoms solves most missing parameters ;)\n')
# return list of updated atom types
new_amber_type = []
for atomtype in amber_type_list:
if atomtype in retype:
new_amber_type.append(retype[atomtype])
else: new_amber_type.append(atomtype.upper())
return new_amber_type
def _retrieve_vdw(self):
# needs self.upper_atom_types and self.atom_sel_idx set up already
type_index = self.read_flag('ATOM_TYPE_INDEX')
n_atoms = self.read_flag('POINTERS')[0]
N_types = 0 # nr of different atoms in prmtop (sel and non sel)
IAC = {}
for i in range(n_atoms):
upper = self.upper_atom_types[i]
N_types = max(N_types, type_index[i])
if (upper not in IAC) and (i in self.atom_sel_idx):
IAC[upper] = type_index[i]
# indexes to search in lennard jones coefs
# of pairs of same atoms
idx_list = self.read_flag('NONBONDED_PARM_INDEX')
ICOs = []
amber_types = []
for upper in IAC:
# -1 for 0 indexing
ICOs.append(idx_list[N_types*(IAC[upper]-1)+IAC[upper]-1]-1)
amber_types.append(upper) # ensures same order in following loops
# Get A and B coefs of Lennard Jones
acoef = self.read_flag('LENNARD_JONES_ACOEF')
bcoef = self.read_flag('LENNARD_JONES_BCOEF')
vdwlist = []
for i in range(len(ICOs)):
ico = ICOs[i]
#print ('Atom: ', amber_types[i])
a = acoef[ico]
b = bcoef[ico]
#print('coefs:',a,b)
# calc r
if b > 0:
r_6 = (2*a/b)
r = pow(r_6, 1.0/6)
ea = b/(r_6*2)
eb = a/(r_6**2)
if round(ea,4) != round(eb,4):
raise RuntimeError (
'well depth different from coef A and B')
R = r/2 # r = ri + rj
E = ea # E = sqrt(ei*ej)
vdwlist.append(parm_vdw(amber_types[i],R,E))
else:
#print ('WARNING: VDW is 0.0 for', amber_types[i])
#print ('LENNARD_JONES_BCOEF was zero for pair %s-%s'
# % (amber_types[i], amber_types[i]))
# ASSIGN HERE
vdwlist.append(parm_vdw(amber_types[i],0.0,0.0))
return vdwlist
def _read_elements(self):
atom_names = self.read_flag('ATOM_NAME')
n_atoms = self.read_flag('POINTERS')[0]
special = {}
special['IP'] = 'Na'
special['IM'] = 'Cl'
special['Na'] = 'Na'
special['K'] = 'K'
special['C0'] = 'Ca'
special['Cl'] = 'Cl'
special['Cs'] = 'Cs'
special['MG'] = 'Mg'
special['Rb'] = 'Rb'
special['Zn'] = 'Zn'
special['F'] = 'F'
special['Br'] = 'Br'
special['I'] = 'I'
known = ['H','C','N','O','P','S']
elements = []
for i in range(n_atoms):
if i in self.atom_sel_idx:
element = atom_names[i]
if element in special:
elements.append(special[element])
elif element[0] in known:
elements.append(element[0])
else:
elements.append('?')
print ('Cannot guess element from atom_name %s' % (
atom_names[i][0]))
print ('Your %d atom will be : ? ' % (len(elements)))
return elements
def _guess_elements(self):
atomic_numbers = self.read_flag('ATOMIC_NUMBER')
periodic_table = {}
periodic_table[0] = 'X'
periodic_table[1] = 'H'
periodic_table[6] = 'C'
periodic_table[7] = 'N'
periodic_table[8] = 'O'
periodic_table[9] = 'F'
periodic_table[11] = 'Na'
periodic_table[12] = 'Mg'
periodic_table[15] = 'P'
periodic_table[16] = 'S'
periodic_table[17] = 'Cl'
periodic_table[19] = 'K'
periodic_table[20] = 'Ca'
periodic_table[25] = 'Mn'
periodic_table[-1] = 'Zn'
periodic_table[26] = 'Fe'
periodic_table[27] = 'Co'
periodic_table[28] = 'Ni'
periodic_table[29] = 'Cu'
periodic_table[30] = 'Zn'
periodic_table[35] = 'Br'
periodic_table[37] = 'Rb'
periodic_table[53] = 'I'
periodic_table[55] = 'Cs'
elements = [periodic_table[nr] for nr in atomic_numbers]
#for nr in atomic_numbers:
# elements.append(periodic_table[nr])
#print ('Sorry, atomic nr %d is not in the periodic table yet' % (nr))
return elements
def _coords_from_inpcrd(self, inpcrd_name):
x = []
y = []
z = []
f = open(inpcrd_name)
f.readline() # title line
n_atoms = int(f.readline().split()[0])
print ('Number of atoms:', n_atoms)
read_n_lines = int(math.ceil(float(n_atoms)/2))
for i in range(read_n_lines):
line = f.readline()
n_chunks = int(len(line)/36) # 3 coords * 12 digits
for j in range (n_chunks):
chunk = line[j*36:(j+1)*36]
x.append(float(chunk[ 0:12]))
y.append(float(chunk[12:24]))
z.append(float(chunk[24:36]))
f.close()
return x,y,z
def gen_oniom(self, filename, inpcrd, notip3p, vmd_sel = ''): # default to all!
"""This is Awesome"""
# Open outfile (Force Overwrite)
out = open(filename, 'w')
# Make an atom selection
self.atom_sel_idx = self.vmdsel(vmd_sel)
# Retype lowercase atom types (GAFF)
self.upper_atom_types = self._gen_gaff_uppercase()
# Create QmmmAtomPdb list
# Read coordinates from .inpcrd ?
atoms_text = self._gen_zmat(inpcrd)
out.write(atoms_text)
out.write('\n')
# Connectivity
conn_text = self._gen_connectivity()
out.write(conn_text)
out.write('\n\n')
# Call vdw, bonds, angles, dihedrals and impropers
bonds = self._retrieve_parm_bond()
angles = self._retrieve_parm_angle()
dihedrals, impropers = self._retrieve_parm_dihedral()
vdws = self._retrieve_vdw()
# HrmBnd1 HW HW OW 0.00 0.00
# HrmBnd1 HW OW HW 0.00 0.00
# Amber non-bonded function
out.write('NonBon 3 1 0 0 0.0 0.0 0.5 0.0 0.0 -1.2\n')
for vdw in vdws:
out.write(vdw.print_gaussian_way() + '\n')
for bond in bonds:
out.write(bond.print_gaussian_way() + '\n')
for ang in angles:
out.write(ang.print_gaussian_way() + '\n')
# tip3p
if not notip3p:
out.write('HrmBnd1 HW HW OW 0.00 0.00\n')
out.write('HrmBnd1 HW OW HW 0.00 0.00\n')
for dih in dihedrals:
out.write(dih.print_gaussian_way() + '\n')
for imp in impropers:
out.write(imp.print_gaussian_way() + '\n')
out.write('\n\n\n')
out.close()
class parm_vdw():
def __init__(self, atom, r, e):
self.atom = atom
self.e = e
self.r = r
def print_gaussian_way(self):
return ('VDW %2s %8.4f %8.4f' % (
self.atom, self.r, self.e))
class parm_bond():
def __init__(self, atom1, atom2, equil, force):
self.atom1 = atom1
self.atom2 = atom2
self.equil = float(equil)
self.force = float(force)
def has_same_atoms(self, other):
cis = (self.atom1 == other.atom1 and
self.atom2 == other.atom2)
trans = (self.atom1 == other.atom2 and
self.atom2 == other.atom1)
return cis or trans
def has_same_values(self, other):
return (self.equil == other.equil and
self.force == other.force)
def print_gaussian_way(self):
return('HrmStr1 %2s %2s %6.2f %6.4f'
% (self.atom1, self.atom2, self.force, self.equil))
class parm_angle():
def __init__(self, atom1, atom2, atom3, equil, force):
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
self.equil = float(equil*180/math.pi)
self.force = float(force)
def has_same_atoms(self, other):
cis = (self.atom1 == other.atom1 and
self.atom3 == other.atom3)
trans = (self.atom1 == other.atom3 and
self.atom3 == other.atom1)
return (cis or trans) and (
self.atom2 == other.atom2)
def has_same_values(self, other):
return (self.equil == other.equil and
self.force == other.force)
def print_gaussian_way(self):
return('HrmBnd1 %2s %2s %2s %6.2f %6.4f'
% (self.atom1, self.atom2, self.atom3, self.force, self.equil))
class parm_dihedral():
def __init__(self, atom1, atom2, atom3, atom4,
force, phase, period,
idivf = 1): # >1 if general dihedral (*-CT-CT-*)
# Set atoms
self.atom1 = atom1;
self.atom2 = atom2;
self.atom3 = atom3;
self.atom4 = atom4;
self.idivf = idivf # known as NPaths in Gaussian
# Set all periods to zeroed dihedral term
self.periods = [( 0, 0.0) for i in range(4)]
# Sel input period to respective force and phase
self.periods[int(period)-1] = (phase, force)
def has_same_atoms(self, other):
cis = (self.atom1 == other.atom1 and
self.atom4 == other.atom4)
trans = (self.atom1 == other.atom4 and
self.atom4 == other.atom1)
midcis = (self.atom2 == other.atom2 and
self.atom3 == other.atom3)
midtrans = (self.atom2 == other.atom3 and
self.atom3 == other.atom2)
return ((cis and midcis) or (trans and midtrans))
def has_same_values(self, other):
return (self.periods == other.periods)
def add_term(self, atom1, atom2, atom3, atom4,
force, phase, period):
# Verify we are acting on the same atoms
if (self.atom1 == atom1 and
self.atom2 == atom2 and
self.atom3 == atom3 and
self.atom4 == atom4) == False:
#raise RuntimeError (
print ('Trying to add term to dihedral of different atoms')
print ('has >',self.atom1,self.atom2,self.atom3,self.atom4)
print ('try >', atom1, atom2, atom3, atom4)
else:
self.periods[int(period)-1] = (phase, force)
def print_gaussian_way(self):
# Phases
P1 = self.periods[0][0]*180/math.pi
P2 = self.periods[1][0]*180/math.pi
P3 = self.periods[2][0]*180/math.pi
P4 = self.periods[3][0]*180/math.pi
# Magnitudes
M1 = self.periods[0][1]
M2 = self.periods[1][1]
M3 = self.periods[2][1]
M4 = self.periods[3][1]
return (
'AmbTrs %2s %2s %2s %2s ' % (
self.atom1, self.atom2, self.atom3, self.atom4) +
#'%6.2f%6.2f%6.2f%6.2f' %(P1,P2,P3,P4) +
'%3d %3d %3d %3d ' %(P1,P2,P3,P4) +
'%7.3f%7.3f%7.3f%7.3f' % (M1,M2,M3,M4) +
'%4.1f'%(self.idivf))
class parm_improper():
def __init__(self, atom1, atom2, atom3, atom4,
force, phase, period):
# Set atoms
self.atom1 = atom1;
self.atom2 = atom2;
self.atom3 = atom3;
self.atom4 = atom4;
# Set values
self.force = force
self.phase = float(phase*180/math.pi)
self.period = period
def has_same_atoms(self, other):
# Forward
sp1 = (self.atom1, self.atom2)
sp2 = (self.atom3, self.atom4)
op1 = (other.atom1, other.atom2)
op2 = (other.atom3, other.atom4)
# Reverse
or1 = (other.atom2, other.atom1)
or2 = (other.atom4, other.atom3)
# Match
cis = (sp1==op1 or sp1==or1) and (sp2==op2 or sp2==or2)
trans=(sp1==op2 or sp1==or2) and (sp2==op1 or sp2==or1)
return (cis or trans)
return ((cis and midcis) or (trans and midtrans))
def has_same_values(self, other):
return (self.period == other.period and
self.force == other.force and
self.phase == other.phase)
def print_gaussian_way(self):
return (
'ImpTrs %2s %2s %2s %2s' % (
self.atom1, self.atom2, self.atom3, self.atom4) +
'%6.1f%7.1f%4.1f' % (
self.force, self.phase, self.period))
|
eduardoftoliveira/oniomMacGyver
|
omg/prmtop.py
|
Python
|
gpl-3.0
| 36,847
|
[
"Amber",
"Gaussian"
] |
12dfa3688a5ececf2b1aff3cf402cd18aa584647922942e935c134eed22e5eed
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
@utx.skipIfMissingFeatures(["ELECTROSTATICS", "EXTERNAL_FORCES"])
class test_icc(ut.TestCase):
system = espressomd.System(box_l=[10, 10, 10])
def tearDown(self):
self.system.actors.clear()
self.system.part.clear()
def add_icc_particles(self, side_num_particles,
initial_charge, z_position):
number = side_num_particles**2
areas = self.system.box_l[0] * \
self.system.box_l[1] / number * np.ones(number)
normals = np.zeros((number, 3))
normals[:, 2] = 1
x_position = np.linspace(
0,
self.system.box_l[0],
side_num_particles,
endpoint=False)
y_position = np.linspace(
0,
self.system.box_l[1],
side_num_particles,
endpoint=False)
x_pos, y_pos = np.meshgrid(x_position, y_position)
positions = np.stack((x_pos, y_pos, np.full_like(
x_pos, z_position)), axis=-1).reshape(-1, 3)
charges = np.full(number, initial_charge)
fix = [(True, True, True)] * number
return self.system.part.add(
pos=positions, q=charges, fix=fix), normals, areas
def common_setup(self, kwargs, error):
from espressomd.electrostatic_extensions import ICC
self.tearDown()
part_slice, normals, areas = self.add_icc_particles(2, 0.01, 0)
params = {"n_icc": len(part_slice),
"normals": normals,
"areas": areas,
"epsilons": np.ones_like(areas),
"first_id": part_slice.id[0],
"check_neutrality": False}
params.update(kwargs)
icc = ICC(**params)
with self.assertRaisesRegex(Exception, error):
self.system.actors.add(icc)
def test_params(self):
params = [({"n_icc": -1}, 'ICC: invalid number of particles'),
({"first_id": -1}, 'ICC: invalid first_id'),
({"max_iterations": -1}, 'ICC: invalid max_iterations'),
({"convergence": -1}, 'ICC: invalid convergence value'),
({"relaxation": -1}, 'ICC: invalid relaxation value'),
({"relaxation": 2.1}, 'ICC: invalid relaxation value'),
({"eps_out": -1}, 'ICC: invalid eps_out'),
({"ext_field": 0}, 'A single value was given but 3 were expected'), ]
for kwargs, error in params:
self.common_setup(kwargs, error)
def test_core_params(self):
from espressomd.electrostatic_extensions import ICC
self.tearDown()
part_slice, normals, areas = self.add_icc_particles(5, 0.01, 0)
params = {"n_icc": len(part_slice),
"normals": normals,
"areas": areas,
"epsilons": np.ones_like(areas),
"first_id": part_slice.id[0],
"check_neutrality": False}
icc = ICC(**params)
self.system.actors.add(icc)
icc_params = icc.get_params()
for key, value in params.items():
np.testing.assert_allclose(value, np.copy(icc_params[key]))
@utx.skipIfMissingFeatures(["P3M"])
def test_dipole_system(self):
from espressomd.electrostatics import P3M
from espressomd.electrostatic_extensions import ICC
BOX_L = 20.
BOX_SPACE = 5.
self.tearDown()
self.system.box_l = [BOX_L, BOX_L, BOX_L + BOX_SPACE]
self.system.cell_system.skin = 0.4
self.system.time_step = 0.01
N_ICC_SIDE_LENGTH = 10
DIPOLE_DISTANCE = 5.0
DIPOLE_CHARGE = 10.0
part_slice_lower, normals_lower, areas_lower = self.add_icc_particles(
N_ICC_SIDE_LENGTH, -0.0001, 0.)
part_slice_upper, normals_upper, areas_upper = self.add_icc_particles(
N_ICC_SIDE_LENGTH, 0.0001, BOX_L)
assert (part_slice_upper.id[-1] - part_slice_lower.id[0] +
1) == 2 * N_ICC_SIDE_LENGTH**2, "ICC particles not continuous"
normals = np.vstack((normals_lower, -normals_upper))
areas = np.hstack((areas_lower, areas_upper))
epsilons = np.full_like(areas, 1e8)
sigmas = np.zeros_like(areas)
icc = ICC(n_icc=2 * N_ICC_SIDE_LENGTH**2,
normals=normals,
areas=areas,
epsilons=epsilons,
sigmas=sigmas,
convergence=1e-6,
max_iterations=100,
first_id=part_slice_lower.id[0],
eps_out=1.,
relaxation=0.75,
ext_field=[0, 0, 0])
# Dipole in the center of the simulation box
BOX_L_HALF = BOX_L / 2
self.system.part.add(pos=[BOX_L_HALF, BOX_L_HALF, BOX_L_HALF - DIPOLE_DISTANCE / 2],
q=DIPOLE_CHARGE, fix=[True, True, True])
self.system.part.add(pos=[BOX_L_HALF, BOX_L_HALF, BOX_L_HALF + DIPOLE_DISTANCE / 2],
q=-DIPOLE_CHARGE, fix=[True, True, True])
p3m = P3M(prefactor=1, mesh=32, cao=7, accuracy=1e-5)
self.system.actors.add(p3m)
self.system.actors.add(icc)
self.system.integrator.run(0)
charge_lower = sum(part_slice_lower.q)
charge_upper = sum(part_slice_upper.q)
testcharge_dipole = DIPOLE_CHARGE * DIPOLE_DISTANCE
induced_dipole = 0.5 * (abs(charge_lower) + abs(charge_upper)) * BOX_L
self.assertAlmostEqual(1, induced_dipole / testcharge_dipole, places=4)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/icc.py
|
Python
|
gpl-3.0
| 6,437
|
[
"ESPResSo"
] |
6f5d85b3db16d3c41a2de4fcc37e48a52b660bf62ef07d4d4234a520e089a851
|
from cvxopt import matrix, lapack, spmatrix
from chompack.symbolic import cspmatrix
from chompack.misc import frontal_get_update
from cvxopt import sqrt
def mrcompletion(A, reordered=True):
"""
Minimum rank positive semidefinite completion. The routine takes a
positive semidefinite cspmatrix :math:`A` and returns a dense
matrix :math:`Y` with :math:`r` columns that satisfies
.. math::
P( YY^T ) = A
where
.. math::
r = \max_{i} |\gamma_i|
is the clique number (the size of the largest clique).
:param A: :py:class:`cspmatrix`
:param reordered: boolean
"""
assert isinstance(A, cspmatrix) and A.is_factor is False, "A must be a cspmatrix"
symb = A.symb
n = symb.n
snpost = symb.snpost
snptr = symb.snptr
chptr = symb.chptr
chidx = symb.chidx
relptr = symb.relptr
relidx = symb.relidx
blkptr = symb.blkptr
blkval = A.blkval
stack = []
r = 0
maxr = symb.clique_number
Y = matrix(0.0,(n,maxr)) # storage for factorization
Z = matrix(0.0,(maxr,maxr)) # storage for EVD of cliques
w = matrix(0.0,(maxr,1)) # storage for EVD of cliques
P = matrix(0.0,(maxr,maxr)) # storage for left singular vectors
Q1t = matrix(0.0,(maxr,maxr)) # storage for right singular vectors (1)
Q2t = matrix(0.0,(maxr,maxr)) # storage for right singular vectors (2)
S = matrix(0.0,(maxr,1)) # storage for singular values
V = matrix(0.0,(maxr,maxr))
Ya = matrix(0.0,(maxr,maxr))
# visit supernodes in reverse topological order
for k in range(symb.Nsn-1,-1,-1):
nn = snptr[k+1]-snptr[k] # |Nk|
na = relptr[k+1]-relptr[k] # |Ak|
nj = na + nn
# allocate F and copy X_{Jk,Nk} to leading columns of F
F = matrix(0.0, (nj,nj))
lapack.lacpy(blkval, F, offsetA = blkptr[k], ldA = nj, m = nj, n = nn, uplo = 'L')
# if supernode k is not a root node:
if na > 0:
# copy Vk to 2,2 block of F
Vk = stack.pop()
lapack.lacpy(Vk, F, offsetB = nn*nj+nn, m = na, n = na, uplo = 'L')
# if supernode k has any children:
for ii in range(chptr[k],chptr[k+1]):
stack.append(frontal_get_update(F,relidx,relptr,chidx[ii]))
# Compute factorization of F
lapack.syevr(F, w, jobz='V', range='A', uplo='L', Z=Z, n=nj,ldZ=maxr)
rk = sum([1 for wi in w[:nj] if wi > 1e-14*w[nj-1]]) # determine rank of clique k
r = max(rk,r) # update rank
# Scale last rk cols of Z and copy parts to Yn
for j in range(nj-rk,nj):
Z[:nj,j] *= sqrt(w[j])
In = symb.snrowidx[symb.sncolptr[k]:symb.sncolptr[k]+nn]
Y[In,:rk] = Z[:nn,nj-rk:nj]
# if supernode k is not a root node:
if na > 0:
# Extract data
Ia = symb.snrowidx[symb.sncolptr[k]+nn:symb.sncolptr[k+1]]
Ya[:na,:r] = Y[Ia,:r]
V[:na,:rk] = Z[nn:nj,nj-rk:nj]
V[:na,rk:r] *= 0.0
# Compute SVDs: V = P*S*Q1t and Ya = P*S*Q2t
lapack.gesvd(V,S,jobu='A',jobvt='A',U=P,Vt=Q1t,ldU=maxr,ldVt=maxr,m=na,n=r,ldA=maxr)
lapack.gesvd(Ya,S,jobu='N',jobvt='A',Vt=Q2t,ldVt=maxr,m=na,n=r,ldA=maxr)
# Scale Q2t
for i in range(min(na,rk)):
if S[i] > 1e-14*S[0]: Q2t[i,:r] = P[:na,i].T*Y[Ia,:r]/S[i]
# Scale Yn
Y[In,:r] = Y[In,:r]*Q1t[:r,:r].T*Q2t[:r,:r]
if reordered:
return Y[:,:r]
else:
return Y[symb.ip,:r]
|
cvxopt/chompack
|
src/python/pybase/mrcompletion.py
|
Python
|
gpl-3.0
| 3,728
|
[
"VisIt"
] |
3e4b6a4b8407baa997e5ec41f469157777192bb8b7ef915ff2318602e823d3c7
|
""" Module for handling AccountingDB tables on multiple DBs (e.g. 2 MySQL servers)
"""
from DIRAC import gConfig, S_OK, gLogger
from DIRAC.Core.Utilities.Plotting.TypeLoader import TypeLoader
from DIRAC.AccountingSystem.DB.AccountingDB import AccountingDB
class MultiAccountingDB(object):
def __init__(self, csPath, readOnly=False):
self.__csPath = csPath
self.__readOnly = readOnly
self.__dbByType = {}
self.__defaultDB = "AccountingDB/AccountingDB"
self.__log = gLogger.getSubLogger("MultiAccDB")
self.__generateDBs()
self.__registerMethods()
def __generateDBs(self):
self.__log.notice("Creating default AccountingDB...")
self.__allDBs = {self.__defaultDB: AccountingDB(readOnly=self.__readOnly)}
types = self.__allDBs[self.__defaultDB].getRegisteredTypes()
result = gConfig.getOptionsDict(self.__csPath)
if not result["OK"]:
gLogger.verbose("No extra databases defined", "in %s" % self.__csPath)
return
validTypes = TypeLoader().getTypes()
opts = result["Value"]
for acType in opts:
if acType not in validTypes:
msg = "(%s defined in %s)" % (acType, self.__csPath)
self.__log.fatal("Not a known accounting type", msg)
raise RuntimeError(msg)
dbName = opts[acType]
gLogger.notice("Type will be assigned", "(%s to %s)" % (acType, dbName))
if dbName not in self.__allDBs:
fields = dbName.split("/")
if len(fields) == 1:
dbName = "Accounting/%s" % dbName
gLogger.notice("Creating DB", "%s" % dbName)
self.__allDBs[dbName] = AccountingDB(dbName, readOnly=self.__readOnly)
self.__dbByType[acType] = dbName
def __registerMethods(self):
for methodName in (
"registerType",
"changeBucketsLength",
"regenerateBuckets",
"deleteType",
"insertRecordThroughQueue",
"deleteRecord",
"getKeyValues",
"retrieveBucketedData",
"calculateBuckets",
"calculateBucketLengthForTime",
):
(
lambda closure: setattr(
self,
closure,
lambda *x: self.__mimeTypeMethod(closure, *x), # pylint: disable=no-value-for-parameter
)
)(methodName)
for methodName in (
"autoCompactDB",
"compactBuckets",
"markAllPendingRecordsAsNotTaken",
"loadPendingRecords",
"getRegisteredTypes",
):
(lambda closure: setattr(self, closure, lambda *x: self.__mimeMethod(closure, *x)))(methodName)
def __mimeTypeMethod(self, methodName, setup, acType, *args):
return getattr(self.__db(acType), methodName)("%s_%s" % (setup, acType), *args)
def __mimeMethod(self, methodName, *args):
end = S_OK()
for dbName in self.__allDBs:
res = getattr(self.__allDBs[dbName], methodName)(*args)
if res and not res["OK"]:
end = res
return end
def __db(self, acType):
return self.__allDBs[self.__dbByType.get(acType, self.__defaultDB)]
def insertRecordBundleThroughQueue(self, records):
recByType = {}
for record in records:
acType = record[1]
if acType not in recByType:
recByType[acType] = []
recByType[acType].append(("%s_%s" % (record[0], record[1]), record[2], record[3], record[4]))
end = S_OK()
for acType in recByType:
res = self.__db(acType).insertRecordBundleThroughQueue(recByType[acType])
if not res["OK"]:
end = res
return end
|
DIRACGrid/DIRAC
|
src/DIRAC/AccountingSystem/DB/MultiAccountingDB.py
|
Python
|
gpl-3.0
| 3,895
|
[
"DIRAC"
] |
d357f713aa49a3201f2cce1a8e8192236cfe8048d3df025ec77ec3b40a9dfdf5
|
import os
import re
import json
import pytz
import numpy as np
from datetime import datetime
import progressbar
from progressbar import ProgressBar
from lib.util import open_log_file
re_timestamp = re.compile(
r"[0-9\-]{10}[\sT][0-9]{2}:[0-9]{2}:[0-9]{2}[\.\,0-9]*[\+\-0-9Z]*")
dt_formats = ["%Y-%m-%d %H:%M:%S,%f%z", "%Y-%m-%d %H:%M:%S%z"]
def parse_date_time(line, time_zone):
dt = re_timestamp.search(line)
if dt is None:
return 0
dt = dt.group(0)
dt = dt.replace('T', ' ')
dt = dt.replace('Z', '+0000')
dt = dt.replace('.', ',')
time_part = dt.partition(' ')[2]
# for "2017-05-12 03:23:31,135-04" format
if (any([sign in time_part
and len(time_part.partition(sign)[2]) == 2
for sign in ['+', '-']])):
dt += '00'
elif not any([sign in time_part for sign in ['+', '-']]):
# if we have time without time zone
dt += time_zone
date_time = ''
for dt_format in dt_formats:
try:
date_time = datetime.strptime(dt, dt_format)
date_time = date_time.astimezone(pytz.utc)
date_time = date_time.timestamp()
break
except ValueError:
continue
if date_time == '':
return 0
return date_time
def find_time_range(output_descriptor, log_directory, files, tz_info,
time_range_info):
logs_datetimes = {}
relevant_logs = []
for log_idx, log in enumerate(files):
f = open_log_file(log)
if f is None:
output_descriptor.write("Unknown file extension: %s" % log)
continue
first_dt = 0
while first_dt == 0:
line = f.readline()
if not line:
break
first_dt = parse_date_time(line, tz_info[log_idx])
if first_dt == 0:
output_descriptor.write("Log file time format not recognized: "
"%s\n" % log)
continue
output_descriptor.write("Reading time ranges of %s\n" % (log,))
f.seek(0, os.SEEK_END)
file_len = f.tell()
offset = 1
last_dt = 0
while last_dt == 0:
while f.read(1) != "\n":
offset += 1
if offset > file_len:
f.seek(0, os.SEEK_SET)
break
f.seek(file_len - offset, os.SEEK_SET)
last_dt = parse_date_time(f.readline(), tz_info[log_idx])
logs_datetimes[log] = [first_dt, last_dt]
if (logs_datetimes[log][1] < logs_datetimes[log][0]):
output_descriptor.write(('Warning: %s - end datetime (%s) is ' +
'less than start time (%s)\n') %
(log, datetime.utcfromtimestamp(
logs_datetimes[log][1]).strftime(
"%Y-%m-%dT%H:%M:%S,%f")[:-3],
datetime.utcfromtimestamp(
logs_datetimes[log][0]).strftime(
"%Y-%m-%dT%H:%M:%S,%f")[:-3]))
output_descriptor.write('\n')
elif (time_range_info != []
and (all([logs_datetimes[log][0] > tr[1]
for tr in time_range_info])
or all([logs_datetimes[log][1] < tr[0]
for tr in time_range_info]))):
output_descriptor.write(('Warning: log file "%s" (%s %s) is ' +
'not in the defined time range') %
(log, datetime.utcfromtimestamp(
logs_datetimes[log][1]).strftime(
"%Y-%m-%dT%H:%M:%S,%f")[:-3],
datetime.utcfromtimestamp(
logs_datetimes[log][1]).strftime(
"%Y-%m-%dT%H:%M:%S,%f")[:-3]))
output_descriptor.write('\n')
else:
relevant_logs += [log]
if relevant_logs == []:
output_descriptor.write('All log files are outside the defined ' +
'time range\n')
return logs_datetimes, relevant_logs
if len(relevant_logs) == 1:
return logs_datetimes, relevant_logs
for log in logs_datetimes.keys():
if (sum([logs_datetimes[log][0] - logs_datetimes[log2][1] < 3600
for log2 in logs_datetimes.keys()]) == 1):
output_descriptor.write('\nWarning: log files does not cross by ' +
'date time\n\n')
return logs_datetimes, relevant_logs
def find_needed_linenum(output_descriptor, log_directory, files, tz_info,
time_range_info):
needed_linenum = {}
for log_idx, log in enumerate(files):
f = open_log_file(log)
if f is None:
output_descriptor.write("Unknown file extension: %s" % log)
continue
needed_linenum[log] = []
if time_range_info == []:
f.seek(0, os.SEEK_END)
needed_linenum[log] += [[0, f.tell()]]
continue
for tr_idx in range(len(time_range_info)):
f.seek(0, os.SEEK_SET)
needed_time = time_range_info[tr_idx][0]
dt = 0
while dt == 0:
cur_pos = f.tell()
dt = parse_date_time(f.readline(), tz_info[log_idx])
cur_time = dt
prev_time = dt
f.seek(0, os.SEEK_END)
file_len = f.tell()
dt = 0
offset = 1
while dt == 0:
while f.read(1) != "\n":
offset += 1
f.seek(file_len-offset, os.SEEK_SET)
cur_pos = f.tell()
dt = parse_date_time(f.readline(), tz_info[log_idx])
prev_pos = 0
cur_pos = 0
next_pos = file_len//2
condition = False
was_found = False
while not (was_found and condition):
f.seek(next_pos, os.SEEK_SET)
offset = 1
while f.read(1) != "\n" and next_pos-offset >= 0:
f.seek(next_pos-offset, os.SEEK_SET)
offset += 1
prev_pos = cur_pos
dt = 0
while dt == 0:
cur_pos = f.tell()
dt = parse_date_time(f.readline(), tz_info[log_idx])
prev_time = cur_time
cur_time = dt
if cur_time >= needed_time:
next_pos = cur_pos - (cur_pos - prev_pos)//2
else:
next_pos = prev_pos - (prev_pos - cur_pos)//2
condition = (prev_time <= needed_time) \
and (cur_time > needed_time) \
or (prev_time > needed_time) \
and (cur_time <= needed_time) \
or (cur_time == prev_time)
if condition:
was_found = True
if cur_time != prev_time:
condition = False
border_left = min(prev_pos, cur_pos)
# end-range
needed_time = time_range_info[tr_idx][1]
cur_time = dt
prev_time = dt
f.seek(0, os.SEEK_END)
file_len = f.tell()
dt = 0
offset = 1
while dt == 0:
while f.read(1) != "\n":
offset += 1
f.seek(file_len-offset, os.SEEK_SET)
cur_pos = f.tell()
dt = parse_date_time(f.readline(), tz_info[log_idx])
prev_pos = border_left
cur_pos = border_left
next_pos = file_len//2
condition = False
was_found = False
while not (was_found and condition):
f.seek(next_pos, os.SEEK_SET)
offset = 1
while f.read(1) != "\n" and next_pos-offset >= 0:
offset += 1
f.seek(next_pos-offset, os.SEEK_SET)
prev_pos = cur_pos
dt = 0
while dt == 0:
cur_pos = f.tell()
dt = parse_date_time(f.readline(), tz_info[log_idx])
prev_time = cur_time
cur_time = dt
if cur_time >= needed_time:
next_pos = cur_pos - (cur_pos - prev_pos)//2
else:
next_pos = prev_pos - (prev_pos - cur_pos)//2
condition = (prev_time < needed_time) \
and (cur_time >= needed_time) \
or (prev_time >= needed_time) \
and (cur_time < needed_time) \
or (cur_time == prev_time)
if condition:
was_found = True
if cur_time != prev_time:
condition = False
border_right = max(prev_pos, cur_pos)
needed_linenum[log] += [[border_left, border_right]]
return needed_linenum
def libvirtd_vm_host(f, filename, pos, tz_info, vms, hosts,
time_range_info):
cur = {}
f.seek(0, os.SEEK_END)
file_len = f.tell()
multiline = False
f.seek(pos[0], os.SEEK_SET)
dt = 0
c = 0
while dt < time_range_info[0]:
c += 1
dt = 0
while dt == 0:
real_firstpos = f.tell()
dt = parse_date_time(f.readline(), tz_info)
f.seek(0, os.SEEK_SET)
widget_style = [filename + ':', progressbar.Percentage(), ' (',
progressbar.SimpleProgress(), ')', ' ',
progressbar.Bar(), ' ', progressbar.Timer()]
bar = ProgressBar(widgets=widget_style, max_value=file_len)
# i = real_firstpos
i = 0
real_lastpos = file_len
bar.update(i)
for line_num, line in enumerate(f):
i += len(line)
bar.update(i)
dt = parse_date_time(line, tz_info)
if dt == 0:
continue
if dt >= time_range_info[1] and real_lastpos == file_len:
real_lastpos = i + real_firstpos
# break
vm_name = re.search(r'\<name\>(.+?)\<\/name\>', line)
if (not multiline and vm_name is not None):
multiline = True
cur['vm_name'] = vm_name.group(1)
continue
vm_id = re.search(r'\<uuid\>(.+?)\<\/uuid\>', line)
if (multiline and vm_id is not None):
cur['vm_id'] = vm_id.group(1)
continue
host_name = re.search(r'\<hostname\>(.+?)\<\/hostname\>', line)
if (multiline and host_name is not None):
cur['host_name'] = host_name.group(1)
continue
host_id = re.search(r'\<hostuuid\>(.+?)\<\/hostuuid\>', line)
if (multiline and host_id is not None):
cur['host_id'] = host_id.group(1)
if (cur['vm_name'] not in vms.keys()):
vms[cur['vm_name']] = {'id': set(), 'hostids': set()}
vms[cur['vm_name']]['id'].add(cur['vm_id'])
vms[cur['vm_name']]['hostids'].add(cur['host_id'])
if (cur['host_name'] not in hosts.keys()):
hosts[cur['host_name']] = {'id': set(), 'vmids': set()}
hosts[cur['host_name']]['id'].add(cur['host_id'])
hosts[cur['host_name']]['vmids'].add(cur['vm_id'])
multiline = False
cur = {}
continue
if multiline:
# host was not found
if (cur != {} and 'vm_name' in cur.keys() and
'vm_id' in cur.keys()):
if (cur['vm_name'] not in vms.keys()):
vms[cur['vm_name']] = {'id': set(), 'hostids': set()}
vms[cur['vm_name']]['id'].add(cur['vm_name'])
multiline = False
cur = {}
continue
# Other types
other_vm = re.search(r'\(VM\: name=(.+?), uuid=(.+?)\)', line)
if other_vm is None:
other_vm = re.search(r'vm=(.+?), uuid=(.+?)\,', line)
if other_vm is not None:
if (other_vm.group(1) not in vms.keys()):
vms[other_vm.group(1)] = {'id': set(), 'hostids': set()}
vms[other_vm.group(1)]['id'].add(other_vm.group(2))
bar.finish()
return vms, hosts, real_firstpos, real_lastpos
def vdsm_vm_host(f, filename, pos, tz_info, vms, hosts, time_range_info):
cur = {}
f.seek(0, os.SEEK_END)
file_len = f.tell()
this_host = ''
multiline = False
f.seek(pos[0], os.SEEK_SET)
dt = 0
while dt < time_range_info[0]:
dt = 0
while dt == 0:
real_firstpos = f.tell()
dt = parse_date_time(f.readline(), tz_info)
f.seek(0, os.SEEK_SET)
widget_style = [filename + ':', progressbar.Percentage(), ' (',
progressbar.SimpleProgress(), ')', ' ',
progressbar.Bar(), ' ', progressbar.Timer()]
bar = ProgressBar(widgets=widget_style, max_value=file_len,
redirect_stdout=True)
i = 0
real_lastpos = file_len
bar.update(i)
for line_num, line in enumerate(f):
i += len(line)
bar.update(i)
dt = parse_date_time(line, tz_info)
if dt == 0:
continue
if dt >= time_range_info[1] and real_lastpos == file_len:
real_lastpos = i + real_firstpos
# break
vdsm_host = re.search(r'I am the actual vdsm ' +
r'([^\ ]+)\ +([^\ ]+)', line)
if vdsm_host is not None:
this_host = vdsm_host.group(2)
if (this_host not in hosts.keys()):
hosts[this_host] = {'id': set(), 'vmids': set()}
vm_name = re.search(r'\<name\>(.+?)\<\/name\>', line)
if (not multiline and vm_name is not None):
multiline = True
cur['vm_name'] = vm_name.group(1)
continue
vm_id = re.search(r'\<uuid\>(.+?)\<\/uuid\>', line)
if (multiline and vm_id is not None):
cur['vm_id'] = vm_id.group(1)
if (cur['vm_name'] not in vms.keys()):
vms[cur['vm_name']] = {'id': set(), 'hostids': set()}
vms[cur['vm_name']]['id'].add(cur['vm_id'])
if this_host != '':
vms[cur['vm_name']]['hostids'].add(this_host)
hosts[this_host]['vmids'].add(cur['vm_id'])
multiline = False
cur = {}
continue
other_vm = re.search(
r'vmId=\'(.+?)\'.+\'vmName\':\ *[u]*\'(.+?)\'', line)
if other_vm is not None:
if (other_vm.group(2) not in vms.keys()):
vms[other_vm.group(2)] = {'id': set(), 'hostids': set()}
vms[other_vm.group(2)]['id'].add(other_vm.group(1))
if this_host != '':
vms[other_vm.group(2)]['hostids'].add(this_host)
hosts[this_host]['vmids'].add(other_vm.group(1))
bar.finish()
return vms, hosts, real_firstpos, real_lastpos
def engine_vm_host(f, filename, pos, tz_info, vms, hosts, time_range_info):
f.seek(0, os.SEEK_END)
file_len = f.tell()
f.seek(pos[0], os.SEEK_SET)
dt = 0
while dt < time_range_info[0]:
dt = 0
while dt == 0:
real_firstpos = f.tell()
dt = parse_date_time(f.readline(), tz_info)
f.seek(0, os.SEEK_SET)
widget_style = [filename + ':', progressbar.Percentage(), ' (',
progressbar.SimpleProgress(), ')', ' ',
progressbar.Bar(), ' ', progressbar.Timer()]
bar = ProgressBar(widgets=widget_style, max_value=file_len)
i = 0
real_lastpos = file_len
bar.update(i)
unknown_vmnames = []
for line_num, line in enumerate(f):
i += len(line)
bar.update(i)
dt = parse_date_time(line, tz_info)
if dt == 0:
continue
vm_name = ''
vm_id = ''
host_name = ''
host_id = ''
if dt >= time_range_info[1] and real_lastpos == file_len:
real_lastpos = i + real_firstpos
# break
if any([v in line.lower() for v in ['vmid', 'vmname', 'vm_name']]):
if (re.search(r"vmId=\'(.+?)\'", line) is not None):
vm_id = re.search(r"vmId=\'(.+?)\'", line).group(1)
elif (re.search(r"\'vmId\'\ *[:=]\ *u*\'(.+?)\'",
line) is not None):
vm_id = re.search(
r"\'vmId\'\ *[:=]\ *u*\'(.+?)\'", line).group(1)
else:
vm_id = ''
if (re.search(r"vmName\ *=\ *(.+?),", line) is not None):
vm_name = re.sub('[\'\"]', '',
re.search(r"vmName\ *=\ *(.+?),",
line).group(1))
elif (re.search(r"vm\ *=\ *\'VM\ *\[([^\[\]]*?)\]\'",
line) is not None):
vm_name = re.sub('[\'\"]', '',
re.search(r"vm\ *=\ *\'VM\ *" +
r"\[([^\[\]]*?)\]\'",
line).group(1))
elif (re.search(r"\[(.+?)=VM_NAME\]", line) is not None):
vm_name = re.sub('[\'\"]', '',
re.search(r"\[([^\[\]]*?)=VM_NAME\]",
line).group(1))
elif (re.search(r"\[(.+?)=VM\]", line) is not None):
vm_name = re.sub('[\'\"]', '',
re.search(r"\[([^\[\]]*?)=VM\]",
line).group(1))
elif (re.search(r"\'vmName\'\ *[:=]\ *u*\'([^\']*?)\'",
line) is not None):
vm_name = re.sub('[\'\"]', '',
re.search(r"\'vmName\'" +
r"\ *[:=]\ *u*\'([^\']*?)\'",
line).group(1))
else:
vm_name = ''
if (vm_name == '' and vm_id == ''):
other_vm = re.search(r'VM\ *\'(.{30,40}?)\'\ *' +
r'\(([^\(\)]*?)\)', line)
if (other_vm is not None):
vm_name = other_vm.group(2)
vm_id = other_vm.group(1)
if (any([l in line.lower()
for l in ['hostid', 'hostname']])):
host_name = re.search(r"HostName\ *=\ *(.+?),", line)
if host_name is not None:
host_name = host_name.group(1)
else:
host_name = ''
host_id = re.search(r"hostId=\'(.+?)\'", line)
if host_id is not None:
host_id = host_id.group(1)
else:
host_id = ''
if vm_name.lower() == 'null':
vm_name = ''
if vm_id.lower() == 'null':
vm_id = ''
if host_id.lower() == 'null':
host_id = ''
if host_name.lower() == 'null':
host_name = ''
if vm_name not in vms.keys():
vms[vm_name] = {'id': set(), 'hostids': set()}
if vm_name == '' and vm_id != '' and host_name != '':
unknown_vmnames += [[vm_id, host_name]]
vms[vm_name]['id'].add(vm_id)
vms[vm_name]['hostids'].add(host_name)
if host_name not in hosts.keys():
hosts[host_name] = {'id': set(), 'vmids': set()}
hosts[host_name]['id'].add(host_id)
hosts[host_name]['vmids'].add(vm_id)
bar.finish()
return vms, unknown_vmnames, hosts, real_firstpos, real_lastpos
def timeline_for_engine_vm(output_directory, log_directory, f, filename,
output_descriptor, tz_info, vms, hosts):
all_vms = {}
for vm in vms.keys():
all_vms[vm] = {}
for host in vms[vm]['hostids']:
all_vms[vm][host] = []
for line_num, line in enumerate(f):
dt = parse_date_time(line, tz_info)
if dt == 0:
continue
vm_start = re.search(r'(VM|Guest)\ +([^\ ]+)\ +' +
r'(started|was restarted)\ +on\ +[Hh]ost\ +' +
r'([^\ ]+?)([\ +\,]+|$)',
line)
if vm_start is not None:
this_host = ''
if vm_start.group(2) not in all_vms.keys():
all_vms[vm_start.group(2)] = {}
for hostname in hosts.keys():
if hostname in vm_start.group(4):
this_host = hostname
break
if this_host == '':
continue
if (this_host not in
all_vms[vm_start.group(2)].keys()):
all_vms[vm_start.group(2)][this_host] = []
all_vms[vm_start.group(2)][this_host] += [(dt, 'start')]
# migration
migration_start = re.search(r'[Mm]igration\ +started' +
r'\ +\(VM\:\ +([^\ ]+),\ +[Ss]ource\:' +
r'\ +([^\ ]+)\,\ +[Dd]estination\:\ +' +
r'([^\ ]+?)[\ +\,]+',
line)
if migration_start is not None:
this_host = ''
if migration_start.group(1) not in all_vms.keys():
all_vms[migration_start.group(1)] = {}
if (migration_start.group(2) not in
all_vms[migration_start.group(1)].keys()):
all_vms[migration_start.group(1)][
migration_start.group(2)] = []
all_vms[migration_start.group(1)][
migration_start.group(2)] += [(dt, 'migrating_from')]
for hostname in hosts.keys():
if hostname in migration_start.group(3):
this_host = hostname
break
if this_host == '':
continue
if (this_host not in all_vms[migration_start.group(1)].keys()):
all_vms[migration_start.group(1)][this_host] = []
all_vms[migration_start.group(1)][
this_host] += [(dt, 'migrating_to')]
# migration completed
migration_end = re.search(r'[Mm]igration\ +completed' +
r'\ +\(VM\:\ +([^\ ]+),\ +[Ss]ource\:' +
r'\ +([^\ ]+)\,\ +[Dd]estination\:\ +' +
r'([^\ ]+?)[\ +\,]+',
line)
if migration_end is not None:
this_host = ''
if migration_end.group(1) not in all_vms.keys():
all_vms[migration_end.group(1)] = {}
if (migration_end.group(2) not in
all_vms[migration_end.group(1)].keys()):
all_vms[migration_end.group(1)][migration_end.group(2)] = []
all_vms[migration_end.group(1)][
migration_end.group(2)] += [(dt, 'migrated_from')]
for hostname in hosts.keys():
if hostname in migration_end.group(3):
this_host = hostname
break
if this_host == '':
continue
if (this_host not in all_vms[migration_end.group(1)].keys()):
all_vms[migration_end.group(1)][this_host] = []
all_vms[migration_end.group(1)][this_host] += [(dt, 'migrated_to')]
# suspend
vm_suspend = re.search(r'VM\ +([^\ ]+)\ +' +
r'on\ +[Hh]ost\ +([^\ ]+)[\ +\,]+is suspended',
line)
if vm_suspend is not None:
this_host = ''
if vm_suspend.group(1) not in all_vms.keys():
all_vms[vm_suspend.group(1)] = {}
for hostname in hosts.keys():
if hostname in vm_suspend.group(2):
this_host = hostname
break
if this_host == '':
continue
if (this_host not in
all_vms[vm_suspend.group(1)].keys()):
all_vms[vm_suspend.group(1)][this_host] = []
all_vms[vm_suspend.group(1)][this_host] += [(dt, 'suspend')]
# down
vm_down = re.search(r'VM\ +([^\ ]+)\ +is [Dd]own',
line)
if vm_down is not None:
if vm_down.group(1) not in all_vms.keys():
all_vms[vm_down.group(1)] = {}
for host in all_vms[vm_down.group(1)].keys():
all_vms[vm_down.group(1)][host] += [(dt, 'down')]
if all_vms != {}:
all_vms = create_time_ranges_for_vms(all_vms)
json.dump(all_vms, open(os.path.join(output_directory,
filename +
'_VMs_timeline.json'),
'w'), indent=4, sort_keys=True)
return all_vms
def create_time_ranges_for_vms(vms):
vm_time_range = {}
for vm_name in vms.keys():
vm_time_range[vm_name] = {}
for host_name in vms[vm_name].keys():
host_time = []
cur_range = {}
for action_id in vms[vm_name][host_name]:
if (action_id[1] == 'start'
or action_id[1] == 'migrating_to'
or action_id[1] == 'migrated_to'):
if len(cur_range) > 0:
pass
else:
cur_range['start'] = action_id[0]
elif (action_id[1] == 'down'
or action_id[1] == 'migrated_from'
or action_id[1] == 'suspend'):
if len(cur_range) == 0:
pass
elif len(cur_range) == 1:
cur_range['end'] = action_id[0]
else:
pass
if ('start' not in cur_range.keys()
and 'end' in cur_range.keys()):
pass
elif ('start' in cur_range.keys()
and 'end' in cur_range.keys()):
host_time += [[cur_range['start'], cur_range['end']]]
cur_range = {}
if ('start' in cur_range.keys()
and 'end' not in cur_range.keys()):
host_time += [[cur_range['start'], cur_range['start']*2]]
if host_time != []:
vm_time_range[vm_name][host_name] = host_time
return vm_time_range
def find_all_vm_host(positions,
output_descriptor,
output_directory,
log_directory,
files,
tz_info,
time_range_info):
vms = {}
hosts = {}
# list of number of first lines for the time range to pass others
first_lines = {}
unknown_vmnames = []
for log_idx, log in enumerate(files):
f = open_log_file(log)
if f is None:
output_descriptor.write("Unknown file extension: %s" % log)
continue
first_lines[log] = []
for tr_idx, log_position in enumerate(positions[log]):
if 'vdsm' in log.lower():
vms, hosts, firstline_pos, lastline_pos = \
vdsm_vm_host(f, log, log_position,
tz_info[log_idx], vms, hosts,
time_range_info[tr_idx])
elif 'libvirt' in log.lower():
vms, hosts, firstline_pos, lastline_pos = \
libvirtd_vm_host(f, log, log_position,
tz_info[log_idx], vms, hosts,
time_range_info[tr_idx])
else:
vms, unknown_vmnames, hosts, firstline_pos, lastline_pos = \
engine_vm_host(f, log, log_position,
tz_info[log_idx],
vms, hosts,
time_range_info[tr_idx])
first_lines[log] += [[firstline_pos, lastline_pos]]
f.close()
not_running_vms = []
for k in sorted(vms.keys()):
if '' in vms[k]['id']:
vms[k]['id'].remove('')
if (len(vms[k]['id']) == 0):
not_running_vms += [k]
vms.pop(k)
not_found_vmnames = []
for k in sorted(vms.keys()):
if ('' in vms[k]['hostids']):
vms[k]['hostids'].remove('')
if k in not_running_vms and len(vms[k]['id']) > 0:
not_running_vms.remove(k)
if k == '':
not_found_vmnames = list(vms[k]['id'])
if '' in not_found_vmnames:
not_found_vmnames.remove('')
vms.pop(k)
continue
if not_found_vmnames != []:
for v in not_found_vmnames.copy():
if v in list(vms[k]['id']):
not_found_vmnames.remove(v)
for vm_idx in unknown_vmnames:
if vm_idx[0] in vms[k]['id']:
vms[k]['hostids'].add(vm_idx[1])
not_found_hostnames = []
for k in sorted(hosts.keys()):
if k == '':
not_found_hostnames = list(hosts[k]['id'])
if '' in not_found_hostnames:
not_found_hostnames.remove('')
hosts.pop(k)
continue
if not_found_hostnames != []:
for i in not_found_hostnames.copy():
if i in list(hosts[k]['id']):
not_found_hostnames.remove(i)
if ('' in hosts[k]['id']):
hosts[k]['id'].remove('')
if ('' in hosts[k]['vmids']):
hosts[k]['vmids'].remove('')
vms_timeline = {}
for log_idx, log in enumerate(files):
if 'engine' not in log:
continue
f = open_log_file(log)
if f is None:
output_descriptor.write("Unknown file extension: %s" % log)
continue
for tr_idx, log_position in enumerate(positions[log]):
cur_timeline = timeline_for_engine_vm(output_directory,
log_directory, f, log,
output_descriptor,
tz_info[log_idx], vms,
hosts)
vms_timeline.update(cur_timeline)
return vms, hosts, not_running_vms, not_found_vmnames, \
not_found_hostnames, first_lines, vms_timeline
def find_vm_tasks_engine(positions, output_descriptor, log_directory,
log, file_formats, tz_info, time_range_info,
output_directory, needed_linenum, reasons,
criterias):
commands_threads = {}
long_actions = []
tasks = {}
commands = {}
f = open_log_file(log)
if f is None:
output_descriptor.write("Unknown file extension: %s" % log)
return commands_threads, long_actions, {}, {}, needed_linenum, reasons
firstline = f.readline()
for fmt in file_formats:
prog = re.compile(fmt)
fields = prog.search(firstline)
if fields is not None:
file_format = prog
break
if fields is None:
# Format is not found
return commands_threads, long_actions, {}, {}, needed_linenum, reasons
f.seek(0, os.SEEK_END)
widget_style = [log + ':', progressbar.Percentage(), ' (',
progressbar.SimpleProgress(), ')', ' ',
progressbar.Bar(), ' ', progressbar.Timer()]
bar = ProgressBar(widgets=widget_style, max_value=max(
[p[i] for p in positions for i in range(len(p))]))
for tr_idx, pos in enumerate(positions):
f.seek(pos[0], os.SEEK_SET)
i = pos[0]
bar.update(i)
for line_num, line in enumerate(f):
i += len(line)
bar.update(i)
fields = file_format.search(line)
if fields is None:
# Tracebacks will be added anyway
continue
fields = fields.groupdict()
dt = parse_date_time(line, tz_info)
if dt == 0:
continue
if (dt > time_range_info[tr_idx][1]):
break
command = re.search(r"\((.+?)\)\ +\[(.*?)\]\ +" +
r"[Rr]unning [Cc]ommand:\ +" +
r"([^\s]+)[Cc]ommand", line)
if (command is not None):
if (command.group(1) not in commands_threads.keys()):
commands_threads[command.group(1)] = []
commands_threads[command.group(1)] += [
{'command_name': command.group(3),
'command_start_name': command.group(3),
'init_time': dt,
'log': log,
'init_line_num': line_num + 1,
'flow_id': command.group(2),
'thread': command.group(1)}]
continue
start = re.search(r"\((.+?)\)\ +\[(.*?)\]\ +" +
r"[Ss][Tt][Aa][Rr][Tt],\ +" +
r"([^\s]+)Command.*\ +log id:\ (.+)", line)
if (start is not None):
if (start.group(1) not in commands_threads.keys()):
commands_threads[start.group(1)] = [
{'command_name': start.group(3),
'command_start_name': start.group(3),
'start_time': dt,
'log': log,
'log_id': start.group(4),
'flow_id': start.group(2),
'thread': start.group(1),
'start_line_num': line_num + 1}]
else:
flow_list = [com['flow_id'] for com in
commands_threads[start.group(1)]]
try:
com_id = len(flow_list) - 1 - \
flow_list[::-1].index(start.group(1))
commands_threads[start.group(1)][
com_id]['command_start_name'] = start.group(3)
commands_threads[start.group(1)][
com_id]['start_time'] = dt
commands_threads[start.group(1)][
com_id]['log_id'] = start.group(4)
commands_threads[start.group(1)][
com_id]['start_line_num'] = line_num + 1
except (KeyError, ValueError):
commands_threads[start.group(1)] += [
{'command_name': start.group(3),
'command_start_name': start.group(3),
'start_time': dt,
'log': log,
'log_id': start.group(4),
'flow_id': start.group(2),
'thread': start.group(1),
'start_line_num': line_num + 1}]
continue
finish = re.search(r"\((.+?)\)\ +\[(.*?)\]\ +" +
r"[Ff][Ii][Nn][Ii][Ss][Hh],\ +" +
r"([^\s]+)Command.*\ +log id:\ (.+)", line)
if (finish is not None):
if (finish.group(1) not in commands_threads.keys()):
continue
for task_idx, command in \
enumerate(commands_threads[finish.group(1)]):
if ('log_id' in command.keys() and
command['log_id'] == finish.group(4)):
commands_threads[finish.group(1)][task_idx][
'finish_time'] = dt
commands_threads[finish.group(1)][task_idx][
'finish_line_num'] = line_num + 1
if ('start_time' in commands_threads[
finish.group(1)][task_idx].keys()):
commands_threads[finish.group(1)][
task_idx]['duration'] = dt - \
commands_threads[finish.group(1)][
task_idx]['start_time']
break
continue
ending = re.search(r"\((.+?)\)\ +\[(.*?)\]\ +" +
r"[Ee]nding\ +[Cc]ommand\ *" +
r"\'.+\.(.+)Command\'\ *successfully", line)
if (ending is not None):
if (ending.group(1) not in commands_threads.keys()):
continue
else:
thr_list = [(com['thread'], idx, com['init_time'])
for thr in
commands_threads.keys()
for idx, com in
enumerate(commands_threads[thr])
if 'init_time' in com.keys()
and 'duration_full' not in com.keys()]
com_list = [com['command_name'] for thr in
commands_threads.keys()
for com in commands_threads[thr]
if 'init_time' in com.keys()
and 'duration_full' not in com.keys()]
to_sort_idx = sorted(range(len(thr_list)),
key=lambda k: thr_list[k][2])
thr_list = [thr_list[idx] for idx in to_sort_idx]
com_list = [com_list[idx] for idx in to_sort_idx]
try:
com_id = len(com_list) - 1 - \
com_list.index(ending.group(3))
except ValueError:
continue
commands_threads[thr_list[com_id][0]][
thr_list[com_id][1]]['end_time'] = dt
commands_threads[thr_list[com_id][0]][
thr_list[com_id][1]]['end_line_num'] = line_num + 1
commands_threads[thr_list[com_id][0]][
thr_list[com_id][1]]['duration_full'] = dt - \
commands_threads[thr_list[com_id][0]][thr_list[
com_id][1]]['init_time']
continue
multiasync = re.search(r"\((.+?)\)\ +\[(.*?)\].+" +
r"[Aa]dding\ +CommandMultiAsyncTasks\ +" +
r"[Oo]bject\ +[Ff]or\ +[Cc]ommand\ +" +
r"\'(.+?)\'", line)
if multiasync is not None:
commands[multiasync.group(3)] = {'name': commands_threads[
multiasync.group(1)][-1]['command_name'],
'thread': multiasync.group(1),
'flow_id': multiasync.group(2),
'log': log,
'first_line_num': line_num + 1}
continue
subtask_init = re.search(r"\((.+?)\)\ +\[(.*?)\].+" +
r"[Aa]ttaching [Tt]ask\ +\'(.+?)\'\ +" +
r"[Tt]o [Cc]ommand\ +\'(.+?)\'",
line)
if subtask_init is not None:
if (subtask_init.group(4) not in commands.keys()):
commands[subtask_init.group(4)] = {
'thread': subtask_init.group(1),
'flow_id': subtask_init.group(2),
'log': log,
'first_line_num': line_num + 1}
tasks[subtask_init.group(3)] = {
'thread': subtask_init.group(1),
'parent_id': subtask_init.group(4),
'flow_id': subtask_init.group(2),
'log': log,
'first_line_num': line_num + 1}
continue
# start
subtask_start = re.search(r"\((.+?)\)\ +\[(.*?)\].+" +
r"[Aa]dding [Tt]ask\ +\'(.+?)\'\ +" +
r"\(*[Pp]arent [Cc]ommand\ +\'(.+?)\'" +
r".*\)",
line)
if subtask_start is not None:
if (subtask_start.group(3) not in tasks.keys()):
tasks[subtask_start.group(3)] = {
'parent_name': subtask_start.group(4),
'thread': subtask_start.group(1),
'start_time': dt,
'flow_id': subtask_start.group(2),
'log': log,
'first_line_num': line_num + 1}
continue
tasks[subtask_start.group(3)]['parent_name'] = \
subtask_start.group(4)
tasks[subtask_start.group(3)]['start_time'] = dt
continue
# wait
subtask_wait = re.search(r"\((.+?)\)\ +\[(.*?)\].+" +
r"[Cc]ommand\ +\'(.+?)\'\ +\([IDid]+\:" +
r"\ +" +
r"\'(.+?)\'\)\ +[Ww]aiting [Oo]n\ +" +
r"[Cc]hild.+[IDid]\:\ +" +
r"\'(.+?)\'\ +[Tt]ype\:\ *\'(.+?)\'",
line)
if subtask_wait is not None:
if (subtask_wait.group(4) not in commands.keys()):
commands[subtask_wait.group(4)] = {
'thread': subtask_wait.group(1),
'flow_id': subtask_wait.group(2),
'log': log,
'first_line_num': line_num + 1}
commands[subtask_wait.group(4)]['name'] = subtask_wait.group(3)
if (subtask_wait.group(5) in commands.keys()
and commands[subtask_wait.group(5)]['name'] !=
subtask_wait.group(6)):
commands[subtask_wait.group(5)] = {
'name': subtask_wait.group(6),
'thread': 'n/a',
'flow_id': 'n/a',
'log': log,
'first_line_num': 'n/a'}
if 'childs' not in commands[subtask_wait.group(4)].keys():
commands[subtask_wait.group(4)]['childs'] = [
{'child_id': subtask_wait.group(5),
'child_name': subtask_wait.group(6)}]
continue
if (subtask_wait.group(5) not in
[child['child_id'] for child in
commands[subtask_wait.group(4)]['childs']]):
commands[subtask_wait.group(4)]['childs'] += [
{'child_id': subtask_wait.group(5),
'child_name': subtask_wait.group(6)}]
continue
# end
subtask_end = re.search(r"\((.+?)\)\ +\[(.*?)\].+" +
r"[Rr]emoved [Tt]ask\ +\'(.+?)\'\ +" +
r"[Ff]rom [Dd]ata[Bb]ase", line)
if subtask_end is not None:
if (subtask_end.group(3) not in tasks.keys()):
continue
tasks[subtask_end.group(3)]['end_time'] = dt
tasks[subtask_end.group(3)]['end_line_num'] = line_num + 1
if ('start_time' in tasks[subtask_end.group(3)].keys()):
tasks[subtask_end.group(3)]['duration'] = dt - \
tasks[subtask_end.group(3)]['start_time']
continue
f.close()
bar.finish()
for com in sorted(commands.keys()):
for task_id in sorted(tasks.keys()):
if 'parent_id' not in tasks[task_id].keys():
tasks.pop(task_id)
continue
if (tasks[task_id]['parent_id'] == com):
if 'tasks' not in commands[com].keys():
commands[com]['ztasks'] = []
commands[com]['ztasks'] += [tasks[task_id]]
commands[com]['ztasks'][-1]['id'] = task_id
tasks.pop(task_id)
json.dump(commands_threads, open(os.path.join(output_directory,
log_directory.split('/')[-2] +
'_engine_commands_by_id.json'),
'w'), indent=4, sort_keys=True)
new_commands, command_lvl = link_commands(log_directory.split('/')[-2],
output_descriptor,
commands,
output_directory)
if commands_threads != {} and 'Long operations' in criterias:
long_actions, needed_linenum, reasons = find_long_operations(
commands_threads,
needed_linenum,
reasons)
return commands_threads, long_actions, new_commands, command_lvl, \
needed_linenum, reasons
def link_commands(log_dir, output_descriptor, commands,
output_directory):
without_parents = []
new_commands = {}
for command_id in sorted(commands.keys()):
if 'childs' not in commands[command_id].keys():
new_commands[command_id] = commands[command_id]
new_commands[command_id]['lvl'] = 1
else:
for child in commands[command_id]['childs']:
if child['child_id'] not in commands.keys():
commands[command_id]['childs'].remove(child)
if len(commands[command_id]['childs']) == 0:
commands[command_id].pop('childs', None)
new_commands[command_id] = commands[command_id]
new_commands[command_id]['lvl'] = 1
leafs = True
while leafs:
leafs = False
for idx, command_id in enumerate(sorted(commands.keys())):
if 'childs' not in commands[command_id].keys():
leafs = True
parent = find_parent(output_descriptor, commands, command_id)
if parent is None:
without_parents += [commands[command_id]]
without_parents[-1]['id'] = command_id
commands.pop(command_id)
new_commands.pop(command_id)
else:
commands.pop(command_id)
new_commands[parent] = {
'name': commands[parent]['name'],
'thread': commands[parent]['thread'],
'flow_id': commands[parent]['flow_id'],
'log': commands[parent]['log'],
'first_line_num': commands[parent][
'first_line_num'],
'id': parent,
'lvl': 2,
'zchildren': []}
for child in commands[parent]['childs']:
if (child['child_id'] in new_commands.keys()
or child['child_id'] in commands.keys()
and 'childs' not in
commands[child['child_id']].keys()):
new_commands[parent]['zchildren'] += \
[new_commands[child['child_id']]]
new_commands[parent]['zchildren'][-1]['id'] = \
child['child_id']
if child['child_id'] in commands.keys():
commands.pop(child['child_id'])
if child['child_id'] in new_commands.keys():
new_commands.pop(child['child_id'])
break
heads = []
com_len = 0
while(com_len != len(commands)):
com_len = len(commands)
for idx, command_id in enumerate(sorted(new_commands.keys())):
if command_id in heads:
continue
parent = find_parent(output_descriptor, commands, command_id)
if parent is None:
if command_id in commands.keys():
commands.pop(command_id)
heads += [command_id]
break
else:
new_commands[parent] = {'name': commands[parent]['name'],
'thread': commands[parent]['thread'],
'flow_id': commands[parent]['flow_id'],
'log': commands[parent]['log'],
'first_line_num': commands[parent][
'first_line_num'],
'id': parent,
'zchildren': []}
for child in commands[parent]['childs']:
if (child['child_id'] in new_commands.keys()
or child['child_id'] in commands.keys()
and 'childs' not in
commands[child['child_id']].keys()):
new_commands[parent]['zchildren'] += \
[new_commands[child['child_id']]]
new_commands[parent]['zchildren'][-1]['id'] = \
child['child_id']
new_commands[parent]['lvl'] = \
new_commands[child['child_id']]['lvl'] + 1
if child['child_id'] in commands.keys():
commands.pop(child['child_id'])
if child['child_id'] in new_commands.keys():
new_commands.pop(child['child_id'])
break
for com in without_parents:
new_commands[com['id']] = com
new_commands, command_lvl = change_lvl_numbering(new_commands)
json.dump(new_commands, open(os.path.join(output_directory,
log_dir + '_commands.json'), 'w'), indent=4, sort_keys=True)
return new_commands, command_lvl
def change_lvl_numbering(commands):
command_lvl = {}
cur_lvl = 1
for com in commands.keys():
commands[com], lvl = change_lvl_numbering_recursive(commands[com],
cur_lvl,
command_lvl)
return commands, command_lvl
def change_lvl_numbering_recursive(com, cur_lvl, lvls):
com['lvl'] = cur_lvl
lvls[com['id']] = cur_lvl
if ('zchildren' not in com.keys()
and 'ztasks' not in com.keys()):
return com, lvls
if 'zchildren' in com.keys():
for child_id, child in enumerate(com['zchildren']):
com['zchildren'][child_id], lvl = \
change_lvl_numbering_recursive(child, cur_lvl + 1, lvls)
lvls.update(lvl)
elif 'ztasks' in com.keys():
for child_id, child in enumerate(com['ztasks']):
com['ztasks'][child_id], lvl = \
change_lvl_numbering_recursive(child, cur_lvl + 1, lvls)
lvls.update(lvl)
return com, lvls
def find_parent(output_descriptor, commands, command_id):
parent = None
for com in sorted(commands.keys()):
if ('childs' in commands[com].keys() and command_id in
[c['child_id'] for c in commands[com]['childs']]):
parent = com
break
return parent
def find_vm_tasks_libvirtd(positions, output_descriptor, log_directory,
log, file_formats, tz_info, time_range_info,
output_directory, needed_linenum, reasons,
criterias):
commands_threads = {}
long_actions = []
qemu_monitor = {}
f = open_log_file(log)
if f is None:
output_descriptor.write("Unknown file extension: %s" % log)
return commands_threads, long_actions, needed_linenum, reasons
firstline = f.readline()
for fmt in file_formats:
prog = re.compile(fmt)
fields = prog.search(firstline)
if fields is not None:
file_format = prog
break
if fields is None:
# Format is not found
return commands_threads, long_actions, needed_linenum, reasons
f.seek(0, os.SEEK_END)
widget_style = [log + ':', progressbar.Percentage(), ' (',
progressbar.SimpleProgress(), ')', ' ',
progressbar.Bar(), ' ', progressbar.Timer()]
bar = ProgressBar(widgets=widget_style, max_value=sum(
[p[i] for p in positions for i in range(len(p))]))
for tr_idx, pos in enumerate(positions):
f.seek(pos[0], os.SEEK_SET)
i = pos[0]
bar.update(i)
for line_num, line in enumerate(f):
i += len(line)
bar.update(i)
fields = file_format.search(line)
if fields is None:
# Tracebacks will be added anyway
continue
fields = fields.groupdict()
dt = parse_date_time(line, tz_info)
if dt == 0:
continue
if (dt > time_range_info[tr_idx][1]):
break
start = re.search(r"Thread (.+?) \((.+?)\) is now running " +
r"job (.+)", line)
if (start is not None):
if (start.group(1) not in commands_threads.keys()):
commands_threads[start.group(1)] = []
commands_threads[start.group(1)] += [
{'command_name': start.group(3),
'command_start_name': start.group(3),
'start_line_num': line_num + 1,
'start_time': dt,
'log': log}]
continue
finish = re.search(r"Thread (.+?) \((.+?)\) finished job (.+?)" +
r"( .*|$)", line)
if (finish is not None):
if (finish.group(1) not in commands_threads.keys()):
continue
else:
com_list = [com['command_name'] for com in
commands_threads[finish.group(1)]]
try:
com_id = len(com_list) - 1 - \
com_list[::-1].index(finish.group(3))
except ValueError:
continue
commands_threads[finish.group(1)][com_id]['finish_time'] = dt
commands_threads[finish.group(1)][com_id][
'finish_line_num'] = line_num + 1
if ('start_time' in commands_threads[
finish.group(1)][com_id].keys()):
commands_threads[finish.group(1)][com_id]['duration'] = \
commands_threads[finish.group(1)][
com_id]['finish_time'] -\
commands_threads[finish.group(1)][
com_id]['start_time']
continue
# qemu monitor
send_monitor = re.search(r"mon\ *=\ *(.+?)\ +buf\ *\=\ *" +
r"\{\"execute.+\"id\"\:\ *\"(.+?)\"\}",
line)
if send_monitor is not None:
if (send_monitor.group(1) not in qemu_monitor.keys()):
qemu_monitor[send_monitor.group(1)] = []
qemu_monitor[send_monitor.group(1)] += [
{'send_time': dt,
'id': send_monitor.group(2),
'start_line_num':
str(line_num + 1),
'log': log}]
return_monitor = re.search(r"mon\ *=\ *(.+?)\ +buf\ *\=\ *" +
r"\{\"return.+\"id\"\:\ *\"(.+?)\"\}",
line)
if return_monitor is not None:
if (return_monitor.group(1) not in qemu_monitor.keys()):
continue
for mes_idx, mes in enumerate(qemu_monitor[
return_monitor.group(1)]):
if (mes['id'] == return_monitor.group(2)):
duration = dt - qemu_monitor[return_monitor.group(1)][
mes_idx]['send_time']
if (duration < 1):
qemu_monitor[return_monitor.group(1)].remove(mes)
break
qemu_monitor[return_monitor.group(1)][
mes_idx]['return_time'] = dt
qemu_monitor[return_monitor.group(1)][
mes_idx]['finish_line_num'] = str(line_num + 1)
qemu_monitor[return_monitor.group(1)][
mes_idx]['duration'] = duration
if ('Long operations' in criterias):
needed_linenum.add(log + ':' + str(line_num + 1))
if (log + ':' + str(line_num + 1)
not in reasons.keys()):
reasons[log + ':' + str(line_num + 1)] = set()
reasons[log + ':' + str(line_num + 1)].add(
'Monitor(duration=' + str(duration) + ')')
needed_linenum.add(log + ':' + qemu_monitor[
return_monitor.group(1)][
mes_idx]['start_line_num'])
if (log + ':' + qemu_monitor[
return_monitor.group(1)][mes_idx][
'start_line_num'] not in reasons.keys()):
reasons[log + ':' + qemu_monitor[
return_monitor.group(1)][
mes_idx][
'start_line_num']] = set()
reasons[log + ':' + qemu_monitor[
return_monitor.group(1)][
mes_idx][
'start_line_num']].add(
'Monitor(duration=' +
str(duration) + ')')
break
f.close()
bar.finish()
json.dump(qemu_monitor, open(os.path.join(output_directory,
log_directory.split('/')[-2] +
'_qemu_libvirt.json'),
'w'), indent=4, sort_keys=True)
if commands_threads != {} and 'Long operations' in criterias:
long_actions, needed_linenum, reasons = find_long_operations(
commands_threads,
needed_linenum,
reasons)
return commands_threads, long_actions, needed_linenum, reasons
def find_long_operations(all_threads, needed_linenum, reasons):
full_operations_time = {}
operations_time = {}
long_operations = {}
for thread in all_threads:
for command in all_threads[thread]:
if ('duration' in command.keys()):
if command['command_start_name'] not in operations_time.keys():
operations_time[command['command_start_name']] = []
operations_time[command['command_start_name']] += [command]
elif ('duration_full' in command.keys()):
if command['command_name'] not in full_operations_time.keys():
full_operations_time[command['command_name']] = []
full_operations_time[command['command_name']] += [command]
for command in sorted(operations_time.keys()):
com_time = [c_id['duration'] for c_id in operations_time[command]]
med_com_time = np.median(com_time)
std_com_time = np.std(com_time)
for c_id in operations_time[command]:
if ((c_id['duration'] > med_com_time + 3*std_com_time
and c_id['duration'] > 1) or c_id['duration'] > 5):
if command not in long_operations.keys():
long_operations[command] = []
long_operations[command] += [c_id['start_time']]
needed_linenum.add(c_id['log'] + ':' +
str(c_id['start_line_num']))
needed_linenum.add(c_id['log'] + ':' +
str(c_id['finish_line_num']))
if (c_id['log'] + ':' + str(c_id['start_line_num'])
not in reasons.keys()):
reasons[c_id['log'] + ':' +
str(c_id['start_line_num'])] = set()
reasons[c_id['log'] + ':' + str(c_id['start_line_num'])].add(
'Task(duration=' + str(np.round(c_id['duration'], 2)) +
')')
if (c_id['log'] + ':' + str(c_id['finish_line_num'])
not in reasons.keys()):
reasons[c_id['log'] + ':' +
str(c_id['finish_line_num'])] = set()
reasons[c_id['log'] + ':' + str(c_id['finish_line_num'])].add(
'Task(duration=' + str(np.round(c_id['duration'], 2)) +
')')
for command in sorted(full_operations_time.keys()):
com_time = [c_id['duration_full']
for c_id in full_operations_time[command]]
med_com_time = np.median(com_time)
std_com_time = np.std(com_time)
for c_id in full_operations_time[command]:
if ((c_id['duration_full'] > med_com_time + 3*std_com_time
and c_id['duration_full'] > 1)
or c_id['duration_full'] > 5):
if command not in long_operations.keys():
long_operations[command] = []
long_operations[command] += [c_id['init_time']]
needed_linenum.add(c_id['log'] + ':' +
str(c_id['init_line_num']))
needed_linenum.add(c_id['log'] + ':' +
str(c_id['end_line_num']))
if (c_id['log'] + ':' + str(c_id['init_line_num'])
not in reasons.keys()):
reasons[c_id['log'] + ':' +
str(c_id['init_line_num'])] = set()
reasons[c_id['log'] + ':' + str(c_id['init_line_num'])].add(
'Task(duration=' + str(np.round(c_id[
'duration_full'], 2)) + ')')
if (c_id['log'] + ':' + str(c_id['end_line_num'])
not in reasons.keys()):
reasons[c_id['log'] + ':' +
str(c_id['end_line_num'])] = set()
reasons[c_id['log'] + ':' + str(c_id['end_line_num'])].add(
'Task(duration=' + str(np.round(c_id[
'duration_full'], 2)) + ')')
return long_operations, needed_linenum, reasons
|
mz-pdm/ovirt-log-analyzer
|
src/lib/detect_running_components.py
|
Python
|
apache-2.0
| 65,702
|
[
"ASE"
] |
fda15b423de0117797b0a3da3a222452e9ed8f532622df52d1c6999bbb4e8090
|
from __future__ import absolute_import
import inspect
import importlib
import logging
import sys
from . import plot
try:
basestring # For Python 2 compatibility
except NameError:
basestring = str # For Python 3 compatibility
class NengoTrial(plot.PlotTrial):
def _create_base_params(self):
super(NengoTrial, self)._create_base_params()
self.param('nengo backend to use', backend='nengo')
self.param('nengo timestep', dt=0.001)
self.param('run in nengo GUI', gui=False, system=True)
self.param('enable debug messages', debug=False, system=True)
self.param('neuron type', neuron_type='default')
def execute_trial(self, p):
if p.debug:
logging.basicConfig(level=logging.DEBUG)
model = self.model(p)
import nengo
if not isinstance(model, nengo.Network):
raise ValueError('model() must return a nengo.Network')
if p.neuron_type != 'default':
if isinstance(p.neuron_type, basestring):
neuron_type = eval(p.neuron_type)
else:
neuron_type = p.neuron_type
if not isinstance(neuron_type, nengo.neurons.NeuronType):
raise AttributeError('%s is not a NeuronType' % p.neuron_type)
for ens in model.all_ensembles:
ens.neuron_type = neuron_type
if p.gui:
locals_dict = getattr(self, 'locals', dict(model=model))
import nengo_gui
import webbrowser
if hasattr(nengo_gui, 'guibackend'):
host = 'localhost'
port = 8080
server_settings = nengo_gui.guibackend.GuiServerSettings((host, port))
model_context = nengo_gui.guibackend.ModelContext(
model=model,
locals=locals_dict,
filename=sys.argv[1],
writeable=False)
page_settings = nengo_gui.page.PageSettings(
filename_cfg=sys.argv[1] + '.cfg',
backend=p.backend,
editor_class=nengo_gui.components.editor.NoEditor)
server = nengo_gui.gui.BaseGUI(
model_context, server_settings, page_settings)
if hasattr(server.server, 'gen_one_time_token'):
wb = webbrowser.get().open('%s://%s:%d/?token=%s' % (
'http', host, port, server.server.gen_one_time_token()))
else:
wb = webbrowser.get().open('%s://%s:%d/' % (
'http', host, port))
server.start()
else:
try:
nengo_gui.GUI(model=model,
filename=sys.argv[1],
locals=locals_dict,
editor=False,
).start()
except TypeError:
# support nengo_gui v0.2.0 and previous
nengo_gui.GUI(model=model,
filename=sys.argv[1],
locals=locals_dict,
interactive=False,
allow_file_change=False,
).start()
else:
backend = p.backend
extra_args = {}
if backend.endswith(')') and '(' in backend:
backend, arg_text = backend[:-1].split('(', 1)
extra_args = eval('dict(%s)' % arg_text)
if ':' in backend:
backend, clsname = backend.split(':', 1)
else:
clsname = 'Simulator'
module = importlib.import_module(backend)
Simulator = getattr(module, clsname)
try:
args = inspect.getargspec(Simulator.__init__)[0]
except:
args = []
if (not p.verbose and 'progress_bar' in args):
self.sim = Simulator(model, dt=p.dt, progress_bar=False,
**extra_args)
else:
self.sim = Simulator(model, dt=p.dt, **extra_args)
with self.sim:
return super(NengoTrial, self).execute_trial(p)
def do_evaluate(self, p):
return self.evaluate(p, self.sim, self.plt)
def make_model(self, **kwargs):
p = self._create_parameters(**kwargs)
return self.model(p)
def model(self, p):
raise NotImplementedError
|
tcstewar/pytry
|
pytry/nengo.py
|
Python
|
gpl-3.0
| 4,779
|
[
"NEURON"
] |
d9a75fd699977eb5dcc544e33fdaa658b08c770f6fc399274e0203dda47cee9e
|
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from struct import unpack
from androguard.decompiler.dad.util import get_type
from androguard.decompiler.dad.opcode_ins import Op
from androguard.decompiler.dad.instruction import (Constant, ThisParam,
BinaryExpression,
BaseClass,
InstanceExpression,
NewInstance,
Variable,
BinaryCompExpression)
logger = logging.getLogger('dad.writer')
class Writer(object):
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.ind = 4
self.buffer = []
self.buffer2 = []
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.skip = False
self.need_break = True
def __str__(self):
return ''.join(self.buffer)
def str_ext(self):
return self.buffer2
def inc_ind(self, i=1):
self.ind += (4 * i)
def dec_ind(self, i=1):
self.ind -= (4 * i)
def space(self):
if self.skip:
self.skip = False
return ''
return ' ' * self.ind
def write_ind(self):
if self.skip:
self.skip = False
else:
self.write(self.space())
self.write_ext(('INDENTATION', self.space()))
def write(self, s, data=None):
self.buffer.append(s)
# old method, still used
# TODO: clean?
if data:
self.buffer2.append((data, s))
# at minimum, we have t as a tuple of the form:
# (TYPE_STR, MY_STR) such as ('THIS', 'this')
# where the 2nd field is the actual generated source code
# We can have more fields, for example:
# ('METHOD', 'sendToServer', 'this -> sendToServer', <androguard.decompiler.dad.instruction.ThisParam>)
def write_ext(self, t):
if not isinstance(t, tuple):
raise "Error in write_ext: %s not a tuple" % str(t)
self.buffer2.append(t)
def end_ins(self):
self.write(';\n')
self.write_ext(('END_INSTRUCTION', ';\n'))
def write_ind_visit_end(self, lhs, s, rhs=None, data=None):
self.write_ind()
lhs.visit(self)
self.write(s)
self.write_ext(('TODO_4343', s, data))
if rhs is not None:
rhs.visit(self)
self.end_ins()
#TODO: prefer this class as write_ind_visit_end that should be deprecated
# at the end
def write_ind_visit_end_ext(self, lhs, before, s, after, rhs=None,
data=None, subsection='UNKNOWN_SUBSECTION'):
self.write_ind()
lhs.visit(self)
self.write(before + s + after)
self.write_ext(('BEFORE', before))
self.write_ext((subsection, s, data))
self.write_ext(('AFTER', after))
if rhs is not None:
rhs.visit(self)
self.end_ins()
def write_inplace_if_possible(self, lhs, rhs):
if isinstance(rhs, BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
if rhs.op in '+-' and isinstance(exp_rhs, Constant) and\
exp_rhs.get_int_value() == 1:
return self.write_ind_visit_end(lhs, rhs.op * 2, data=rhs)
return self.write_ind_visit_end(
lhs, ' %s= ' % rhs.op, exp_rhs, data=rhs)
return self.write_ind_visit_end(lhs, ' = ', rhs, data=rhs)
def visit_ins(self, ins):
ins.visit(self)
def write_method(self):
acc = []
access = self.method.access
self.constructor = False
for modifier in access:
if modifier == 'constructor':
self.constructor = True
continue
acc.append(modifier)
self.write('\n%s' % self.space())
self.write_ext(('NEWLINE', '\n%s' % (self.space())))
if acc:
self.write('%s ' % ' '.join(acc))
self.write_ext(('PROTOTYPE_ACCESS', '%s ' % ' '.join(acc)))
if self.constructor:
name = get_type(self.method.cls_name).split('.')[-1]
self.write(name)
self.write_ext(('NAME_METHOD_PROTOTYPE', '%s' % name, self.method))
else:
self.write(
'%s %s' % (get_type(self.method.type), self.method.name))
self.write_ext(
('PROTOTYPE_TYPE', '%s' % get_type(self.method.type)))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_METHOD_PROTOTYPE',
'%s' % self.method.name, self.method))
params = self.method.lparams
if 'static' not in access:
params = params[1:]
proto = ''
self.write_ext(('PARENTHESIS_START', '('))
if self.method.params_type:
proto = ', '.join(['%s p%s' % (get_type(p_type), param) for
p_type, param in zip(self.method.params_type, params)])
first = True
for p_type, param in zip(self.method.params_type, params):
if not first:
self.write_ext(('COMMA', ', '))
else:
first = False
self.write_ext(('ARG_TYPE', '%s' % get_type(p_type)))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_ARG', 'p%s' % param, p_type, self.method))
self.write_ext(('PARENTHESIS_END', ')'))
self.write('(%s)' % proto)
if self.graph is None:
self.write(';\n')
self.write_ext(('METHOD_END_NO_CONTENT', ';\n'))
return
self.write('\n%s{\n' % self.space())
self.write_ext(('METHOD_START', '\n%s{\n' % self.space()))
self.inc_ind()
self.visit_node(self.graph.entry)
self.dec_ind()
self.write('%s}\n' % self.space())
self.write_ext(('METHOD_END', '%s}\n' % self.space()))
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
var.visit_decl(self)
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
follow = loop.follow['loop']
if follow is None and not loop.looptype.is_endless:
logger.error('Loop has no follow !')
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
self.write('%swhile (' % self.space())
self.write_ext(('WHILE', '%swhile (' % self.space()))
loop.visit_cond(self)
self.write(') {\n')
self.write_ext(('WHILE_START', ') {\n'))
elif loop.looptype.is_posttest:
self.write('%sdo {\n' % self.space())
self.write_ext(('DO', '%sdo {\n' % self.space()))
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
self.write('%swhile(true) {\n' % self.space())
self.write_ext(('WHILE_TRUE', '%swhile(true) {\n' % self.space()))
self.inc_ind()
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
self.dec_ind()
if loop.looptype.is_pretest:
self.write('%s}\n' % self.space())
self.write_ext(('END_PRETEST', '%s}\n' % self.space()))
elif loop.looptype.is_posttest:
self.latch_node.pop()
self.write('%s} while(' % self.space())
self.write_ext(('WHILE_POSTTEST', '%s} while(' % self.space()))
loop.latch.visit_cond(self)
self.write(');\n')
self.write_ext(('POSTTEST_END', ');\n'))
else:
self.inc_ind()
self.visit_node(loop.latch)
self.dec_ind()
self.write('%s}\n' % self.space())
self.write_ext(('END_LOOP', '%s}\n' % self.space()))
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
follow = cond.follow['if']
if cond.false is cond.true:
self.write('%s// Both branches of the condition point to the same'
' code.\n' % self.space())
self.write_ext(
('COMMENT_ERROR_MSG',
'%s// Both branches of the condition point to the same'
' code.\n' % self.space()))
self.write('%s// if (' % self.space())
self.write_ext(('COMMENT_IF', '%s// if (' % self.space()))
cond.visit_cond(self)
self.write(') {\n')
self.write_ext(('COMMENT_COND_END', ') {\n'))
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s// }\n' % self.space(), data="COMMENT_IF_COND_END")
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
self.write('%sif (' % self.space(), data="IF_2")
cond.visit_cond(self)
self.write(') {\n', data="IF_TRUE_2")
self.inc_ind()
self.write('%sbreak;\n' % self.space(), data="BREAK")
self.dec_ind()
self.write('%s}\n' % self.space(), data="IF_END_2")
self.visit_node(cond.false)
elif follow is not None:
if cond.true in (follow, self.next_case) or\
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if cond.true: # in self.visited_nodes:
self.write('%sif (' % self.space(), data="IF")
cond.visit_cond(self)
self.write(') {\n', data="IF_TRUE")
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
is_else = not (follow in (cond.true, cond.false))
if is_else and not cond.false in self.visited_nodes:
self.write('%s} else {\n' % self.space(), data="IF_FALSE")
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.if_follow.pop()
self.write('%s}\n' % self.space(), data="IF_END")
self.visit_node(follow)
else:
self.write('%sif (' % self.space(), data="IF_3")
cond.visit_cond(self)
self.write(') {\n', data="IF_COND_3")
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s} else {\n' % self.space(), data="ELSE_3")
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.write('%s}\n' % self.space(), data="IF_END_3")
def visit_short_circuit_condition(self, nnot, aand, cond1, cond2):
if nnot:
cond1.neg()
self.write('(', data="TODO24")
cond1.visit_cond(self)
self.write(') %s (' % ['||', '&&'][aand], data="TODO25")
cond2.visit_cond(self)
self.write(')', data="TODO26")
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
self.write('%sswitch (' % self.space(), data="SWITCH")
self.visit_ins(switch_ins)
self.write(') {\n', data="SWITCH_END")
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
self.inc_ind()
for case in switch.node_to_case[node]:
self.write(
'%scase %d:\n' % (self.space(), case), data="CASE_XX")
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
self.write('%sdefault:\n' % self.space(), data="CASE_DEFAULT")
default = None
self.inc_ind()
self.visit_node(node)
if self.need_break:
self.write('%sbreak;\n' % self.space(), data="CASE_BREAK")
else:
self.need_break = True
self.dec_ind(2)
if default not in (None, follow):
self.inc_ind()
self.write('%sdefault:\n' % self.space(), data="CASE_DEFAULT_2")
self.inc_ind()
self.visit_node(default)
self.dec_ind(2)
self.write('%s}\n' % self.space(), data="CASE_END")
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.write('%sbreak;\n' % self.space(), data="BREAK_2")
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
self.write('%stry {\n' % self.space(), data="TRY_START")
self.inc_ind()
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
self.dec_ind()
self.write('%s}' % self.space(), data="TRY_START_END")
for catch in try_node.catch:
self.visit_node(catch)
self.write('\n', data="NEWLINE_END_TRY")
self.visit_node(self.try_follow.pop())
def visit_catch_node(self, catch_node):
self.write(' catch (', data="CATCH")
catch_node.visit_exception(self)
self.write(') {\n', data="CATCH_START")
self.inc_ind()
self.visit_node(catch_node.catch_start)
self.dec_ind()
self.write('%s}' % self.space(), data="CATCH_END")
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
def visit_decl(self, var):
if not var.declared:
var_type = var.get_type() or 'unknownType'
self.write('%s%s v%s' % (
self.space(), get_type(var_type),
var.name), data="DECLARATION")
self.end_ins()
def visit_constant(self, cst):
if isinstance(cst, str):
return self.write(string(cst), data="CONSTANT_STRING")
self.write('%r' % cst, data="CONSTANT_INTEGER") # INTEGER or also others?
def visit_base_class(self, cls, data=None):
self.write(cls)
self.write_ext(('NAME_BASE_CLASS', cls, data))
def visit_variable(self, var):
var_type = var.get_type() or 'unknownType'
if not var.declared:
self.write('%s ' % get_type(var_type))
self.write_ext(
('VARIABLE_TYPE', '%s' % get_type(var_type), var_type))
self.write_ext(('SPACE', ' '))
var.declared = True
self.write('v%s' % var.name)
self.write_ext(('NAME_VARIABLE', 'v%s' % var.name, var, var_type))
def visit_param(self, param, data=None):
self.write('p%s' % param)
self.write_ext(('NAME_PARAM', 'p%s' % param, data))
def visit_this(self):
self.write('this', data="THIS")
def visit_assign(self, lhs, rhs):
if lhs is not None:
return self.write_inplace_if_possible(lhs, rhs)
self.write_ind()
rhs.visit(self)
if not self.skip:
self.end_ins()
def visit_move_result(self, lhs, rhs):
self.write_ind_visit_end(lhs, ' = ', rhs)
def visit_move(self, lhs, rhs):
if lhs is not rhs:
self.write_inplace_if_possible(lhs, rhs)
def visit_astore(self, array, index, rhs, data=None):
self.write_ind()
array.visit(self)
self.write('[', data=("ASTORE_START", data))
index.visit(self)
self.write('] = ', data="ASTORE_END")
rhs.visit(self)
self.end_ins()
def visit_put_static(self, cls, name, rhs):
self.write_ind()
self.write('%s.%s = ' % (cls, name), data="STATIC_PUT")
rhs.visit(self)
self.end_ins()
def visit_put_instance(self, lhs, name, rhs, data=None):
self.write_ind_visit_end_ext(
lhs, '.', '%s' % name, ' = ', rhs,
data=data, subsection='NAME_CLASS_ASSIGNMENT')
def visit_new(self, atype, data=None):
self.write('new %s' % get_type(atype))
self.write_ext(('NEW', 'new '))
self.write_ext(
('NAME_CLASS_NEW', '%s' % get_type(atype), data.type, data))
def visit_invoke(self, name, base, ptype, rtype, args, invokeInstr=None):
if isinstance(base, ThisParam):
if name == '<init>' and self.constructor and len(args) == 0:
self.skip = True
return
base.visit(self)
if name != '<init>':
if isinstance(base, BaseClass):
call_name = "%s -> %s" % (base.cls, name)
elif isinstance(base, InstanceExpression):
call_name = "%s -> %s" % (base.ftype, name)
elif hasattr(base, "base") and hasattr(base, "var_map"):
base2base = base
while True:
base2base = base2base.var_map[base2base.base]
if isinstance(base2base, NewInstance):
call_name = "%s -> %s" % (base2base.type, name)
break
elif (hasattr(base2base, "base") and
hasattr(base2base, "var_map")):
continue
else:
call_name = "UNKNOWN_TODO"
break
elif isinstance(base, ThisParam):
call_name = "this -> %s" % name
elif isinstance(base, Variable):
call_name = "%s -> %s" % (base.type, name)
else:
call_name = "UNKNOWN_TODO2"
self.write('.%s' % name)
self.write_ext(('INVOKE', '.'))
self.write_ext(
('NAME_METHOD_INVOKE',
'%s' % name, call_name, ptype, rtype, base, invokeInstr))
self.write('(', data="PARAM_START")
comma = False
for arg in args:
if comma:
self.write(', ', data="PARAM_SEPARATOR")
comma = True
arg.visit(self)
self.write(')', data="PARAM_END")
def visit_return_void(self):
self.write_ind()
self.write('return', data="RETURN")
self.end_ins()
def visit_return(self, arg):
self.write_ind()
self.write('return ', data="RETURN")
arg.visit(self)
self.end_ins()
def visit_nop(self):
pass
def visit_switch(self, arg):
arg.visit(self)
def visit_check_cast(self, arg, atype):
self.write('((%s) ' % atype, data="CHECKCAST")
arg.visit(self)
self.write(')')
def visit_aload(self, array, index):
array.visit(self)
self.write('[', data="ALOAD_START")
index.visit(self)
self.write(']', data="ALOAD_END")
def visit_alength(self, array):
array.visit(self)
self.write('.length', data="ARRAY_LENGTH")
def visit_new_array(self, atype, size):
self.write('new %s[' % get_type(atype[1:]), data="NEW_ARRAY")
size.visit(self)
self.write(']', data="NEW_ARRAY_END")
def visit_filled_new_array(self, atype, size, args):
self.write('new %s {' % get_type(atype), data="NEW_ARRAY_FILLED")
for idx, arg in enumerate(args):
arg.visit(self)
if idx + 1 < len(args):
self.write(', ', data="COMMA")
self.write('})', data="NEW_ARRAY_FILLED_END")
def visit_fill_array(self, array, value):
self.write_ind()
array.visit(self)
self.write(' = {', data="ARRAY_FILLED")
data = value.get_data()
tab = []
elem_size = value.element_width
if elem_size == 4:
for i in range(0, value.size * 4, 4):
tab.append('%s' % unpack('i', data[i:i + 4])[0])
else: # FIXME: other cases
for i in range(value.size):
tab.append('%s' % unpack('b', data[i])[0])
self.write(', '.join(tab), data="COMMA")
self.write('}', data="ARRAY_FILLED_END")
self.end_ins()
def visit_move_exception(self, var, data=None):
var.declared = True
var_type = var.get_type() or 'unknownType'
self.write('%s v%s' % (get_type(var_type), var.name))
self.write_ext(
('EXCEPTION_TYPE', '%s' % get_type(var_type), data.type))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_CLASS_EXCEPTION', 'v%s' % var.value(), data.type, data))
def visit_monitor_enter(self, ref):
self.write_ind()
self.write('synchronized(', data="SYNCHRONIZED")
ref.visit(self)
self.write(') {\n', data="SYNCHRONIZED_END")
self.inc_ind()
def visit_monitor_exit(self, ref):
self.dec_ind()
self.write_ind()
self.write('}\n', data="MONITOR_EXIT")
def visit_throw(self, ref):
self.write_ind()
self.write('throw ', data="THROW")
ref.visit(self)
self.end_ins()
def visit_binary_expression(self, op, arg1, arg2):
self.write('(', data="BINARY_EXPRESSION_START")
arg1.visit(self)
self.write(' %s ' % op, data="TODO58")
arg2.visit(self)
self.write(')', data="BINARY_EXPRESSION_END")
def visit_unary_expression(self, op, arg):
self.write('(%s ' % op, data="UNARY_EXPRESSION_START")
arg.visit(self)
self.write(')', data="UNARY_EXPRESSION_END")
def visit_cast(self, op, arg):
self.write('(%s ' % op, data="CAST_START")
arg.visit(self)
self.write(')', data="CAST_END")
def visit_cond_expression(self, op, arg1, arg2):
arg1.visit(self)
self.write(' %s ' % op, data="COND_EXPRESSION")
arg2.visit(self)
def visit_condz_expression(self, op, arg):
if isinstance(arg, BinaryCompExpression):
arg.op = op
return arg.visit(self)
atype = arg.get_type()
if atype == 'Z':
if op == Op.EQUAL:
self.write('!', data="NEGATE")
arg.visit(self)
else:
arg.visit(self)
if atype in 'VBSCIJFD':
self.write(' %s 0' % op, data="TODO64")
else:
self.write(' %s null' % op, data="TODO65")
def visit_get_instance(self, arg, name, data=None):
arg.visit(self)
self.write('.%s' % name)
self.write_ext(('GET_INSTANCE', '.'))
self.write_ext(('NAME_CLASS_INSTANCE', '%s' % name, data))
def visit_get_static(self, cls, name):
self.write('%s.%s' % (cls, name), data="GET_STATIC")
def string(s):
ret = ['"']
for c in s.decode('utf8'):
if c >= ' ' and c < '\x7f':
if c == "'" or c == '"' or c == '\\':
ret.append('\\')
ret.append(c)
continue
elif c <= '\x7f':
if c in ('\r', '\n', '\t'):
ret.append(c.encode('unicode-escape'))
continue
i = ord(c)
ret.append('\\u')
ret.append('%x' % (i >> 12))
ret.append('%x' % ((i >> 8) & 0x0f))
ret.append('%x' % ((i >> 4) & 0x0f))
ret.append('%x' % (i & 0x0f))
ret.append('"')
return ''.join(ret).encode('utf8')
|
subho007/androguard
|
androguard/decompiler/dad/writer.py
|
Python
|
apache-2.0
| 25,567
|
[
"VisIt"
] |
4c7da8555082f4aeec6c1c06c21914abd67ff374b3dd486770cbac25fb08a76b
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch.utils.data import TensorDataset, DataLoader
import types
import numpy as np
import math
import pandas as pd
import tempfile
import os
from bigdl.orca.automl.model.abstract import BaseModel, ModelBuilder
from bigdl.orca.automl.metrics import Evaluator
from bigdl.orca.automl.pytorch_utils import LR_NAME, DEFAULT_LR
PYTORCH_REGRESSION_LOSS_MAP = {"mse": "MSELoss",
"mae": "L1Loss",
"huber_loss": "SmoothL1Loss"}
class PytorchBaseModel(BaseModel):
def __init__(self, model_creator, optimizer_creator, loss_creator,
check_optional_config=False):
self.check_optional_config = check_optional_config
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
self.config = None
self.model = None
self.model_built = False
self.onnx_model = None
self.onnx_model_built = False
def _create_loss(self):
if isinstance(self.loss_creator, torch.nn.modules.loss._Loss):
self.criterion = self.loss_creator
else:
self.criterion = self.loss_creator(self.config)
def _create_optimizer(self):
import types
if isinstance(self.optimizer_creator, types.FunctionType):
self.optimizer = self.optimizer_creator(self.model, self.config)
else:
# use torch default parameter values if user pass optimizer name or optimizer class.
try:
self.optimizer = self.optimizer_creator(self.model.parameters(),
lr=self.config.get(LR_NAME, DEFAULT_LR))
except:
raise ValueError("We failed to generate an optimizer with specified optim "
"class/name. You need to pass an optimizer creator function.")
def build(self, config):
# check config and update
self._check_config(**config)
self.config = config
# build model
if "selected_features" in config:
config["input_feature_num"] = len(config['selected_features'])\
+ config['output_feature_num']
self.model = self.model_creator(config)
if not isinstance(self.model, torch.nn.Module):
raise ValueError("You must create a torch model in model_creator")
self.model_built = True
self._create_loss()
self._create_optimizer()
def _reshape_input(self, x):
if x.ndim == 1:
x = x.reshape(-1, 1)
return x
def _np_to_creator(self, data):
def data_creator(config):
x, y = PytorchBaseModel.covert_input(data)
x = self._reshape_input(x)
y = self._reshape_input(y)
return DataLoader(TensorDataset(x, y),
batch_size=int(config["batch_size"]),
shuffle=True)
return data_creator
def fit_eval(self, data, validation_data=None, mc=False, verbose=0, epochs=1, metric=None,
metric_func=None, resources_per_trial=None,
**config):
"""
:param data: data could be a tuple with numpy ndarray with form (x, y) or
a PyTorch DataLoader or a data creator which takes a config dict and returns a
torch.utils.data.DataLoader. torch.Tensor should be generated from the
dataloader.
:param validation_data: validation data could be a tuple with numpy ndarray
with form (x, y), a PyTorch DataLoader or a data creator which takes
a config dict and returns a torch.utils.data.DataLoader. torch.Tensor
should be generated from the dataloader.
fit_eval will build a model at the first time it is built
config will be updated for the second or later times with only non-model-arch
params be functional
TODO: check the updated params and decide if the model is needed to be rebuilt
"""
# todo: support input validation data None
assert validation_data is not None, "You must input validation data!"
if not metric:
raise ValueError("You must input a valid metric value for fit_eval.")
# resources_per_trial
if resources_per_trial is not None:
torch.set_num_threads(resources_per_trial["cpu"])
os.environ["OMP_NUM_THREADS"] = str(resources_per_trial["cpu"])
# update config settings
def update_config():
if not isinstance(data, types.FunctionType) and not isinstance(data, DataLoader):
x = self._reshape_input(data[0])
y = self._reshape_input(data[1])
config.setdefault("past_seq_len", x.shape[-2])
config.setdefault("future_seq_len", y.shape[-2])
config.setdefault("input_feature_num", x.shape[-1])
config.setdefault("output_feature_num", y.shape[-1])
if not self.model_built:
update_config()
self.build(config)
else:
tmp_config = self.config.copy()
tmp_config.update(config)
self._check_config(**tmp_config)
self.config.update(config)
# get train_loader and validation_loader
if isinstance(data, types.FunctionType):
train_loader = data(self.config)
validation_loader = validation_data(self.config)
elif isinstance(data, DataLoader):
train_loader = data
assert isinstance(validation_data, DataLoader)
validation_loader = validation_data
else:
assert isinstance(data, tuple) and isinstance(validation_data, tuple),\
f"data/validation_data should be a tuple or\
data creator function but found {type(data)}"
assert isinstance(data[0], np.ndarray) and isinstance(validation_data[0], np.ndarray),\
f"Data and validation_data should be a tuple of np.ndarray " \
f"but found {type(data[0])} as the first element of data."
assert isinstance(data[1], np.ndarray) and isinstance(validation_data[1], np.ndarray),\
f"Data and validation_data should be a tuple of np.ndarray " \
f"but found {type(data[1])} as the second element of data."
train_data_creator = self._np_to_creator(data)
valid_data_creator = self._np_to_creator(validation_data)
train_loader = train_data_creator(self.config)
validation_loader = valid_data_creator(self.config)
epoch_losses = []
for i in range(epochs):
train_loss = self._train_epoch(train_loader)
epoch_losses.append(train_loss)
train_stats = {"loss": np.mean(epoch_losses), "last_loss": epoch_losses[-1]}
val_stats = self._validate(validation_loader, metric_name=metric, metric_func=metric_func)
self.onnx_model_built = False
return val_stats
@staticmethod
def to_torch(inp):
if isinstance(inp, np.ndarray):
return torch.from_numpy(inp)
if isinstance(inp, (pd.DataFrame, pd.Series)):
return torch.from_numpy(inp.values)
return inp
@staticmethod
def covert_input(data):
x = PytorchBaseModel.to_torch(data[0]).float()
y = PytorchBaseModel.to_torch(data[1]).float()
return x, y
def _train_epoch(self, train_loader):
self.model.train()
total_loss = 0
batch_idx = 0
for x_batch, y_batch in train_loader:
self.optimizer.zero_grad()
yhat = self._forward(x_batch, y_batch)
loss = self.criterion(yhat, y_batch)
loss.backward()
self.optimizer.step()
total_loss += loss.item()
batch_idx += 1
train_loss = total_loss/batch_idx
return train_loss
def _forward(self, x, y):
return self.model(x)
def _validate(self, validation_loader, metric_name, metric_func=None):
if not metric_name:
assert metric_func, "You must input valid metric_func or metric_name"
metric_name = metric_func.__name__
self.model.eval()
with torch.no_grad():
yhat_list = []
y_list = []
for x_valid_batch, y_valid_batch in validation_loader:
yhat_list.append(self.model(x_valid_batch).numpy())
y_list.append(y_valid_batch.numpy())
yhat = np.concatenate(yhat_list, axis=0)
y = np.concatenate(y_list, axis=0)
# val_loss = self.criterion(yhat, y)
if metric_func:
eval_result = metric_func(y, yhat)
else:
eval_result = Evaluator.evaluate(metric=metric_name,
y_true=y, y_pred=yhat,
multioutput='uniform_average')
return {metric_name: eval_result}
def _print_model(self):
# print model and parameters
print(self.model)
print(len(list(self.model.parameters())))
for i in range(len(list(self.model.parameters()))):
print(list(self.model.parameters())[i].size())
def evaluate(self, x, y, metrics=['mse'], multioutput="raw_values", batch_size=32):
# reshape 1dim input
x = self._reshape_input(x)
y = self._reshape_input(y)
yhat = self.predict(x, batch_size=batch_size)
eval_result = [Evaluator.evaluate(m, y_true=y, y_pred=yhat, multioutput=multioutput)
for m in metrics]
return eval_result
def predict(self, x, mc=False, batch_size=32):
# reshape 1dim input
x = self._reshape_input(x)
if not self.model_built:
raise RuntimeError("You must call fit_eval or restore first before calling predict!")
x = PytorchBaseModel.to_torch(x).float()
if mc:
self.model.train()
else:
self.model.eval()
test_loader = DataLoader(TensorDataset(x),
batch_size=int(batch_size))
y_list = []
with torch.no_grad():
for x_test_batch in test_loader:
y_list.append(self.model(x_test_batch[0]).numpy())
yhat = np.concatenate(y_list, axis=0)
return yhat
def predict_with_uncertainty(self, x, n_iter=100):
result = np.zeros((n_iter,) + (x.shape[0], self.config["output_feature_num"]))
for i in range(n_iter):
result[i, :, :] = self.predict(x, mc=True)
prediction = result.mean(axis=0)
uncertainty = result.std(axis=0)
return prediction, uncertainty
def state_dict(self):
state = {
"config": self.config,
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
return state
def load_state_dict(self, state):
self.config = state["config"]
self.model = self.model_creator(self.config)
self.model.load_state_dict(state["model"])
self.model_built = True
self._create_optimizer()
self.optimizer.load_state_dict(state["optimizer"])
self._create_loss()
def save(self, checkpoint):
if not self.model_built:
raise RuntimeError("You must call fit_eval or restore first before calling save!")
state_dict = self.state_dict()
torch.save(state_dict, checkpoint)
def restore(self, checkpoint):
state_dict = torch.load(checkpoint)
self.load_state_dict(state_dict)
def evaluate_with_onnx(self, x, y, metrics=['mse'], dirname=None,
multioutput="raw_values", batch_size=32):
# reshape 1dim input
x = self._reshape_input(x)
y = self._reshape_input(y)
yhat = self.predict_with_onnx(x, dirname=dirname, batch_size=batch_size)
eval_result = [Evaluator.evaluate(m, y_true=y, y_pred=yhat, multioutput=multioutput)
for m in metrics]
return eval_result
def _build_onnx(self, x, dirname=None, thread_num=None, sess_options=None):
if not self.model_built:
raise RuntimeError("You must call fit_eval or restore\
first before calling onnx methods!")
try:
import onnx
import onnxruntime
except:
raise RuntimeError("You should install onnx and onnxruntime to use onnx based method.")
if dirname is None:
dirname = tempfile.mkdtemp(prefix="onnx_cache_")
# code adapted from
# https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
torch.onnx.export(self.model,
x,
os.path.join(dirname, "cache.onnx"),
export_params=True,
opset_version=10,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
self.onnx_model = onnx.load(os.path.join(dirname, "cache.onnx"))
onnx.checker.check_model(self.onnx_model)
if sess_options is None:
sess_options = onnxruntime.SessionOptions()
if thread_num is not None:
sess_options.intra_op_num_threads = thread_num
self.ort_session = onnxruntime.InferenceSession(os.path.join(dirname, "cache.onnx"),
sess_options=sess_options)
self.onnx_model_built = True
def predict_with_onnx(self, x, mc=False, dirname=None, batch_size=32):
# reshape 1dim input
x = self._reshape_input(x)
if not self.onnx_model_built:
x_torch_tensor = PytorchBaseModel.to_torch(x[0:1]).float()
self._build_onnx(x_torch_tensor, dirname=dirname)
yhat_list = []
sample_num = x.shape[0]
batch_num = math.ceil(sample_num/batch_size)
for batch_id in range(batch_num):
ort_inputs = {self.ort_session.get_inputs()[0].name: x[batch_id*batch_size:
(batch_id+1)*batch_size]}
ort_outs = self.ort_session.run(None, ort_inputs)
yhat_list.append(ort_outs[0])
yhat = np.concatenate(yhat_list, axis=0)
return yhat
def _get_required_parameters(self):
return {}
def _get_optional_parameters(self):
return {"batch_size",
LR_NAME,
"dropout",
"optim",
"loss"
}
class PytorchModelBuilder(ModelBuilder):
def __init__(self, model_creator,
optimizer_creator,
loss_creator):
from bigdl.orca.automl.pytorch_utils import validate_pytorch_loss, validate_pytorch_optim
self.model_creator = model_creator
optimizer = validate_pytorch_optim(optimizer_creator)
self.optimizer_creator = optimizer
loss = validate_pytorch_loss(loss_creator)
self.loss_creator = loss
def build(self, config):
model = PytorchBaseModel(self.model_creator,
self.optimizer_creator,
self.loss_creator)
model.build(config)
return model
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/automl/model/base_pytorch_model.py
|
Python
|
apache-2.0
| 16,322
|
[
"ORCA"
] |
09a9f57c164ff43e91e905b117bf43c8123ef58ae01fa26106b3f06484a1027f
|
#!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import time
import threading
import subprocess
import json
import sys
import os
import os.path
import string
import uuid
import urllib2
import tempfile
import datetime
# monkey patch py2.7's check_output() into py2.6's missing one :)
if "check_output" not in dir( subprocess ): # duck punch it in!
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
import ConfigParser
config = ConfigParser.SafeConfigParser()
config.read ( './c4a-fe.conf' )
if config.has_section ( 'PyPath' ):
for p in config.items ( 'PyPath' ):
sys.path.append ( p [ 0 ] )
print "Python module path - adding '", p [ 0 ], "'"
else:
print "WARN: Config is missing [PyPath] section"
PRIDFILENAME = "c4a-prof"
class Frontend:
def cb_newedit_profile ( self, widget, data=None, edit=False ):
prid = None
self.new_prof_d = gtk.Dialog ( title="Create Profile", parent=None, flags=0, buttons=None )
self.new_prof_ok_b = gtk.Button ( "OK" )
self.new_prof_ok_b.connect ( "clicked", self.cb_new_profile_ok, None )
self.new_prof_cancel_b = gtk.Button ( "Cancel" )
self.new_prof_cancel_b.connect ( "clicked", self.cb_new_profile_cancel, None )
self.new_prof_table_l = gtk.Table ( rows = 4, columns = 3 )
# labels
self.new_prof_shortname_l = gtk.Label()
self.new_prof_shortname_l.set_markup ( "3 Letter Nick (ex <b>EVD</b>)" )
self.new_prof_shortname_l.show()
self.new_prof_shortname_l.set_alignment ( xalign=0.0, yalign=0.5 )
self.new_prof_longname_l = gtk.Label()
self.new_prof_longname_l.set_markup ( "Display Nick (ex <b>EvilDragon</b>)" )
self.new_prof_longname_l.show()
self.new_prof_longname_l.set_alignment ( xalign=0.0, yalign=0.5 )
self.new_prof_password_l = gtk.Label()
self.new_prof_password_l.set_markup ( "Alphanumeric Password (ex <b>aBc123</b>)" )
self.new_prof_password_l.show()
self.new_prof_password_l.set_alignment ( xalign=0.0, yalign=0.5 )
self.new_prof_email_l = gtk.Label()
self.new_prof_email_l.set_markup ( "Email Address for score\nbeing beaten notifications (ex <b>billg@microsoft.com.</b>)" )
self.new_prof_email_l.show()
self.new_prof_email_l.set_alignment ( xalign=0.0, yalign=0.5 )
# text entry fields
self.new_prof_shortname_e = gtk.Entry ( max = 3 )
self.new_prof_shortname_e.connect ( "insert_text", self.cb_password_insert_handler )
self.new_prof_longname_e = gtk.Entry ( max = 32 )
self.new_prof_longname_e.connect ( "insert_text", self.cb_password_insert_handler )
self.new_prof_password_e = gtk.Entry ( max = 32 )
self.new_prof_password_e.connect ( "insert_text", self.cb_password_insert_handler )
self.new_prof_email_e = gtk.Entry ( max = 256 )
self.new_prof_email_e.connect ( "insert_text", self.cb_email_insert_handler )
self.new_prof_shortname_e.show()
self.new_prof_longname_e.show()
self.new_prof_password_e.show()
self.new_prof_email_e.show()
self.new_prof_table_l.attach ( self.new_prof_shortname_l, 0, 1, 0, 1 )
self.new_prof_table_l.attach ( self.new_prof_shortname_e, 1, 2, 0, 1 )
self.new_prof_table_l.attach ( self.new_prof_longname_l, 0, 1, 1, 2 )
self.new_prof_table_l.attach ( self.new_prof_longname_e, 1, 2, 1, 2 )
self.new_prof_table_l.attach ( self.new_prof_password_l, 0, 1, 2, 3 )
self.new_prof_table_l.attach ( self.new_prof_password_e, 1, 2, 2, 3 )
self.new_prof_table_l.attach ( self.new_prof_email_l, 0, 1, 3, 4 )
self.new_prof_table_l.attach ( self.new_prof_email_e, 1, 2, 3, 4 )
self.new_prof_table_l.set_col_spacings ( 10 )
self.new_prof_table_l.set_row_spacings ( 10 )
self.new_prof_table_l.show()
self.new_prof_ok_b.show()
self.new_prof_cancel_b.show()
self.new_prof_d.vbox.pack_start ( self.new_prof_table_l, False, False, 10 )
self.new_prof_d.action_area.pack_start ( self.new_prof_ok_b, False, False, 0 )
self.new_prof_d.action_area.pack_start ( self.new_prof_cancel_b, False, False, 0 )
self.new_prof_d.show()
# edit?
if edit:
pridfile = self.fetch_pridfile()
prid = pridfile [ 'prid' ]
self.new_prof_shortname_e.set_text ( pridfile [ 'shortname' ] )
self.new_prof_longname_e.set_text ( pridfile [ 'longname' ] )
self.new_prof_password_e.set_text ( pridfile [ 'password' ] )
self.new_prof_email_e.set_text ( pridfile [ 'email' ] )
# run it..
#
r = self.new_prof_d.run()
if r == True:
pridfile = dict()
if edit:
pridfile [ 'prid' ] = prid
else:
pridfile [ 'prid' ] = str( uuid.uuid4() )
pridfile [ 'shortname' ] = self.new_prof_shortname_e.get_text().upper()
pridfile [ 'longname' ] = self.new_prof_longname_e.get_text()
pridfile [ 'password' ] = self.new_prof_password_e.get_text()
pridfile [ 'email' ] = self.new_prof_email_e.get_text()
self.write_pridfile ( pridfile )
self.push_profile ( pridfile )
self.update_grayed_out()
self.new_prof_d.hide()
self.new_prof_d.destroy()
def cb_password_insert_handler ( self, entry, text, length, position ):
position = entry.get_position() # Because the method parameter 'position' is useless
# Build a new string with allowed characters only.
result = ''.join([c for c in text if c in string.ascii_letters+string.digits ])
# The above line could also be written like so (more readable but less efficient):
# result = ''
# for c in text:
# if c in string.hexdigits:
# result += c
if result != '':
# Insert the new text at cursor (and block the handler to avoid recursion).
entry.handler_block_by_func ( self.cb_password_insert_handler )
entry.insert_text ( result, position )
entry.handler_unblock_by_func ( self.cb_password_insert_handler )
# Set the new cursor position immediately after the inserted text.
new_pos = position + len ( result )
# Can't modify the cursor position from within this handler,
# so we add it to be done at the end of the main loop:
gobject.gobject.idle_add ( entry.set_position, new_pos )
# We handled the signal so stop it from being processed further.
entry.stop_emission("insert_text")
def cb_email_insert_handler ( self, entry, text, length, position ):
position = entry.get_position() # Because the method parameter 'position' is useless
# Build a new string with allowed characters only.
result = ''.join([c for c in text if c in string.ascii_letters+string.digits+string.punctuation ])
# The above line could also be written like so (more readable but less efficient):
# result = ''
# for c in text:
# if c in string.hexdigits:
# result += c
if result != '':
# Insert the new text at cursor (and block the handler to avoid recursion).
entry.handler_block_by_func ( self.cb_email_insert_handler )
entry.insert_text ( result, position )
entry.handler_unblock_by_func ( self.cb_email_insert_handler )
# Set the new cursor position immediately after the inserted text.
new_pos = position + len ( result )
# Can't modify the cursor position from within this handler,
# so we add it to be done at the end of the main loop:
gobject.gobject.idle_add ( entry.set_position, new_pos )
# We handled the signal so stop it from being processed further.
entry.stop_emission("insert_text")
def is_entry_valid ( self, t ):
if len ( t ) == 0:
return False
return True
def cb_new_profile_ok ( self, widget, data=None ):
t = self.new_prof_shortname_e.get_text().upper()
if not self.is_entry_valid ( t ):
return
t = self.new_prof_longname_e.get_text()
if not self.is_entry_valid ( t ):
return
t = self.new_prof_password_e.get_text()
if not self.is_entry_valid ( t ):
return
t = self.new_prof_email_e.get_text()
if not self.is_entry_valid ( t ):
return
self.new_prof_d.response ( True )
def cb_new_profile_cancel ( self, widget, data=None ):
self.new_prof_d.response ( False )
def cb_edit_profile ( self, widget, data=None ):
self.cb_newedit_profile ( widget, data, edit = True )
def cb_del_profile ( self, widget, data=None ):
dialog = gtk.MessageDialog ( self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_YES_NO,
"Delete current profile? (may not recovered!)" )
r = dialog.run()
dialog.destroy()
if r == gtk.RESPONSE_YES:
if self.push_delete_profile ( self.fetch_pridfile() ):
os.unlink ( PRIDFILENAME )
self.update_grayed_out()
def cb_play_game ( self, widget, data=None ):
if self.is_profile_exist():
self.invoke_emu ( self.selected_gamename )
else:
md = gtk.MessageDialog ( self.window, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE, "Please create a profile first" )
md.run()
md.destroy()
def cb_set_banner ( self, text ):
self.banner_l.set_markup ( text )
def cb_set_gamelist ( self, gamelist ):
# build sorted gamelist
names = reversed ( sorted ( gamelist, key = lambda ge: ge [ 'longname' ].lower() ) )
for gent in names:
if gent [ 'field' ] != 'arcade':
continue
b = gtk.Button ( "Select " + gent [ 'longname' ] )
b.connect ( "clicked", self.cb_clicked_game, gent [ 'gamename' ] )
self.left_vb.pack_end ( b, False, False, 0 )
"""
if self.is_profile_exist():
pass
else:
b.set_sensitive ( False )
"""
b.show()
self.gamebuttons.append ( b )
def cb_clicked_game ( self, but, v ):
#print "Desire to start game", v
self.selected_gamename = v
self.update_grayed_out()
b = self.pull_highscore_with_ui ( v )
j = json.loads ( b )
text = 'Highest this month (full list available at c4a.openpandora.org)\n'
text += "\n"
runlen = 1
last = None
if j [ 'hi' ] == 0:
text += "No scores registered yet (or server unavailable.)"
self.cb_set_banner ( text )
return
for ent in j [ 'scoreboard' ]:
if last == ent [ 'longname' ]:
pass
else:
#text += str ( runlen ).ljust ( 5 )
#text += "\t"
text += ent [ 'shortname' ].ljust ( 5 )
text += "\t"
text += ent [ 'longname' ].ljust ( 30 )
text += "\t"
text += str ( ent [ 'score' ] )
text += "\n"
runlen += 1
if runlen > 6:
break
last = ent [ 'longname' ]
self.cb_set_banner ( text )
def invoke_emu ( self, gamename ):
# sync scores - pull
# we pull them all, since they can switch games in-emu..
self.sync_gamelist_with_ui ( push = False, current = gamename ) # pull
# run the emu
emubase = config.get ( 'Exec', 'mamebase' )
emu = emubase % { "gamename": gamename }
print "REM: Invoking '%s'" % ( emu )
subprocess.call ( emu, shell=True )
# sync scores - push
self.sync_gamelist_with_ui ( push = True, current = gamename ) # push
def update_grayed_out ( self ):
if self.is_profile_exist():
self.new_profile_b.set_sensitive ( False )
else:
self.new_profile_b.set_sensitive ( True )
if self.is_profile_exist():
self.edit_profile_b.set_sensitive ( True )
else:
self.edit_profile_b.set_sensitive ( False )
if self.is_profile_exist():
self.del_profile_b.set_sensitive ( True )
else:
self.del_profile_b.set_sensitive ( False )
if self.selected_gamename == None:
self.play_b.set_sensitive ( False )
else:
self.play_b.set_sensitive ( True )
def delete_event ( self, widget, event, data=None ):
# return False -> GTK will ask for destroy
# return True -> GTK will not ask to destroy (ask "you're sure?"?)
#print "User asks for delete widget"
return False # kill me
def destroy ( self, widget, data=None ):
#print "destroy signal occurred"
gtk.main_quit() # exeunt
def __init__(self):
# create a new window
self.window = gtk.Window ( gtk.WINDOW_TOPLEVEL )
self.window.set_default_size ( config.getint ( 'Display', 'width' ), config.getint ( 'Display', 'height' ) )
self.window.set_position ( gtk.WIN_POS_CENTER )
if config.getint ( 'Display', 'fullscreen' ) > 0:
self.window.fullscreen()
self.window.set_title ( "Compo4All" )
self.window.set_border_width ( 10 )
# state
self.selected_gamename = None
# handlers
self.window.connect ( "delete_event", self.delete_event )
self.window.connect ( "destroy", self.destroy )
# widgets
self.outer_hb = gtk.HBox ( False, 0 )
self.window.add ( self.outer_hb )
self.left_vb_s = gtk.ScrolledWindow()
self.left_vb_s.set_policy ( gtk.POLICY_NEVER, gtk.POLICY_ALWAYS )
self.left_vb_s.show()
self.left_vb = gtk.VBox ( False, 0 )
self.right_vb = gtk.VBox ( False, 0 )
self.outer_hb.pack_start ( self.left_vb_s, True ) # False for thin column
self.left_vb_s.add_with_viewport ( self.left_vb )
self.outer_hb.pack_start ( self.right_vb )
image = gtk.Image()
image.set_from_file ( config.get ( 'Display', 'banner_image' ) )
image.show()
self.right_vb.pack_start ( image )
# Buttons
self.new_profile_b = gtk.Button ( "Create new profile" )
self.new_profile_b.connect ( "clicked", self.cb_newedit_profile, None )
self.edit_profile_b = gtk.Button ( "Edit existing profile" )
self.edit_profile_b.connect ( "clicked", self.cb_edit_profile, None )
self.del_profile_b = gtk.Button ( "Delete existing profile" )
self.del_profile_b.connect ( "clicked", self.cb_del_profile, None )
self.quit_b = gtk.Button ( "Quit C4A" )
self.quit_b.connect_object ( "clicked", gtk.Widget.destroy, self.window )
self.play_b = gtk.Button ( "Play" )
self.play_b.connect_object ( "clicked", self.cb_play_game, None )
self.gamebuttons = list()
# This packs the button into the window (a GTK container).
self.left_vb.pack_start ( self.new_profile_b, False, False, 0 )
self.left_vb.pack_start ( self.edit_profile_b, False, False, 0 )
self.left_vb.pack_start ( self.del_profile_b, False, False, 0 )
s = gtk.HSeparator(); s.show(); self.left_vb.add ( s )
self.left_vb.pack_start ( self.quit_b, False, False, 0 )
s = gtk.HSeparator(); s.show(); self.left_vb.add ( s )
self.banner_l = gtk.Label()
self.banner_l.set_markup ( "Banner: <i>Waiting for server...</i>" )
self.banner_l.set_line_wrap ( True )
self.banner_l.set_alignment ( xalign=0.22, yalign=0.5 )
self.banner_l.show()
self.right_vb.pack_start ( self.banner_l )
self.log_l = gtk.Label()
self.log_l.set_markup ( "Waiting for server..." )
self.log_l.set_line_wrap ( True )
self.log_l.set_alignment ( xalign=0.0, yalign=0.0 )
self.log_l.show()
self.right_vb.pack_end ( self.log_l, True, True )
self.right_vb.pack_end ( self.play_b )
# The final step is to display this newly created widget.
self.new_profile_b.show()
self.edit_profile_b.show()
self.del_profile_b.show()
self.quit_b.show()
self.play_b.show()
# and the window
self.left_vb.show()
self.right_vb.show()
self.outer_hb.show()
self.window.show()
# data pull notification..
if not self.is_server_available():
md = gtk.MessageDialog ( self.window, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE, "Error contacting server!")
md.run()
md.destroy()
sys.exit ( -1 )
self.pull_banner_and_update_with_ui()
self.pull_gamelist_and_update_with_ui()
if self.is_profile_exist():
#self.pull_profile_and_update_with_ui()
pass
else:
self.del_profile_b.set_sensitive ( False )
self.update_grayed_out()
def is_server_available ( self ):
self.append_log ( "Checking connectivity .." )
try:
b = subprocess.check_output ( config.get ( 'Sources', 'ohai' ), stderr=subprocess.STDOUT, shell=True )
j = json.loads ( b )
if j [ 'status' ] == 'OK':
self.finish_log()
return 1
print "Bad status from server OHAI"
except:
print "avail: Unexpected error:", sys.exc_info()[0]
self.finish_log()
return 0
def pull_highscore_with_ui ( self, gamename ):
self.append_log ( "Fetching scores from the server .." )
url = config.get ( 'Sources', 'highscore_base' ) + gamename + "/"
b = subprocess.check_output ( url, stderr=subprocess.STDOUT, shell=True )
self.finish_log()
return b
def pull_banner_and_update_with_ui ( self ):
# blast, python + thread + urllib/httplib2/etc are fubar, skip for now
# http://zetcode.com/gui/pygtk/dialogs/
self.append_log ( "Fetching banner from the server .." )
b = subprocess.check_output ( config.get ( 'Sources', 'banner' ), stderr=subprocess.STDOUT, shell=True )
j = json.loads ( b )
self.cb_set_banner ( j [ 'banner' ] )
self.finish_log()
def sync_gamelist_with_ui ( self, push = True, current = None ):
scpath = config.get ( 'Exec', 'spaghetti' )
self.append_log ( "Syncing scores for current season .." )
if push: # push
for gn in self.gamelist:
if gn [ 'field' ] != 'arcade':
continue
# strategy options: see below...
# - but in essence, let us look for modified 'recently' (today?) and existance
# --> if file _exists_, and if modified recently, sync it
# check existance
scorepath = './hi/' + gn [ 'gamename' ] + '.hi'
if os.path.exists ( scorepath ):
# check today-ness
timestamp = os.path.getmtime ( scorepath )
if datetime.date.fromtimestamp ( timestamp ) == datetime.date.today():
scrun = scpath + " push -d " + gn [ 'gamename' ]
self.append_log ( "Syncing scores for current season .. " + gn [ 'gamename' ] )
try:
print "sync push: scrun", scrun
subprocess.call ( scrun, shell = True )
except:
print "sync push: Unexpected error:", sys.exc_info()
print scrun
else: # pull
for gn in self.gamelist:
if gn [ 'field' ] != 'arcade':
continue
# strategy options..
# - pull them all (in case of game switching in the emu, and to get fresh default scores from server)
# - pull only the one (ignore game switching case, runs fast, and get from server)
# - rm them all (let emu generate fresh scores of its own, no server pull at all); we basicly trust the user
# all the time anyway (cheating avoidance etc), so this is not so bad..
# - combined: rm them all, but pull the target game from server
self.append_log ( "Pulling scores for current season .. " + gn [ 'gamename' ] )
if gn [ 'gamename' ] == current:
scrun = scpath + " pull " + gn [ 'gamename' ]
try:
print "sync pull: scrun", scrun
subprocess.call ( scrun, shell = True )
except:
print "sync pull: Unexpected error:", sys.exc_info()
print scrun
else:
try:
print "sync pull: rm", gn [ 'gamename' ]
os.remove ( './hi/' + gn [ 'gamename' ] + '.hi' )
except:
pass
self.finish_log()
def pull_gamelist_and_update_with_ui ( self ):
# blast, python + thread + urllib/httplib2/etc are fubar, skip for now
# http://zetcode.com/gui/pygtk/dialogs/
self.append_log ( "Fetching current game list from the server .." )
b = subprocess.check_output ( config.get ( 'Sources', 'curgamelist' ), stderr=subprocess.STDOUT, shell=True )
j = json.loads ( b )
self.cb_set_gamelist ( j [ 'gamelist' ] )
self.gamelist = j [ 'gamelist' ]
self.finish_log()
def is_profile_exist ( self ):
if os.path.isfile ( PRIDFILENAME ):
return True
return False
def fetch_pridfile ( self ):
try:
f = open ( PRIDFILENAME, "r" )
f.readline() # pull off leading prid
pridfile = f.read()
j = json.loads ( pridfile )
f.close()
return j
except:
return None
def write_pridfile ( self, d ):
f = open ( PRIDFILENAME, "w" )
f.write ( d [ 'prid' ] + "\n" )
f.write ( json.dumps ( d ) )
f.close()
def push_delete_profile ( self, d ):
curlpath = config.get ( 'Exec', 'curl' )
url = config.get ( 'Sources', 'delprofile' )
j = json.dumps ( d )
f = tempfile.NamedTemporaryFile ( delete = False )
f.write ( j )
f.close()
curlrun = curlpath + " -T " + f.name + " " + url
subprocess.call ( curlrun, shell = True )
os.unlink ( f.name )
return True
def push_profile ( self, d ):
curlpath = config.get ( 'Exec', 'curl' )
url = config.get ( 'Sources', 'setprofile' )
j = json.dumps ( d )
#opener = urllib2.build_opener(urllib2.HTTPHandler)
#request = urllib2.Request ( url, data=j )
#request.add_header ( 'Content-Type', 'text/json' )
#request.get_method = lambda: 'PUT'
#url = opener.open(request)
f = tempfile.NamedTemporaryFile ( delete = False )
f.write ( j )
f.close()
#curlrun = curlpath + " -T - " + url
#curlrun = curlpath + " -T ../Makefile " + url
curlrun = curlpath + " -T " + f.name + " " + url
#p = subprocess.Popen ( [ '/usr/bin/curl', '-T', '-', url ], stdin=subprocess.PIPE )
#p.communicate ( input=j )
subprocess.call ( curlrun, shell = True )
os.unlink ( f.name )
return True
def pull_profile_and_update_with_ui ( self ):
prid = self.fetch_pridfile()
if not prid:
return
self.append_log ( "Fetching profile from the server .." )
url = config.get ( 'Sources', 'getprofile_base' ) + prid [ 'prid' ]
b = subprocess.check_output ( url, stderr=subprocess.STDOUT, shell=True )
j = json.loads ( b )
#self.cb_set_banner ( j [ 'banner' ] )
self.finish_log()
def append_log ( self, message ):
self.log_l.show()
self.log_l.set_markup ( message )
while gtk.events_pending():
gtk.main_iteration()
def finish_log ( self ):
self.log_l.set_markup ( "" )
self.log_l.hide()
def main ( self ):
gtk.main()
if __name__ == "__main__":
fe = Frontend()
fe.main()
|
skeezix/compo4all
|
spaghetti-launcher/c4a-fe.py
|
Python
|
gpl-2.0
| 25,486
|
[
"BLAST"
] |
5692a73558066498104be9607c6eac47946be655d9890b9fe8d27e2a0c71a2b8
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import json
import os
import shlex
import zipfile
import re
import pkgutil
from ast import AST, Import, ImportFrom
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils.common.json import AnsibleJSONEncoder
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader
from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, _nested_dict_get
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
from ansible.utils.display import Display
from collections import namedtuple
try:
import importlib.util
import importlib.machinery
imp = None
except ImportError:
import imp
# if we're on a Python that doesn't have FNFError, redefine it as IOError (since that's what we'll see)
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
display = Display()
ModuleUtilsProcessEntry = namedtuple('ModuleUtilsInfo', ['name_parts', 'is_ambiguous', 'has_redirected_child', 'is_optional'])
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
import os
import os.path
# Access to the working directory is required by Python when using pipelining, as well as for the coverage module.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
try:
os.chdir(os.path.expanduser('~'))
except OSError:
os.chdir('/')
%(rlimit)s
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
# Strip cwd from sys.path to avoid potential permissions issues
excludes = set(('', '.', scriptdir))
sys.path = [p for p in sys.path if p not in excludes]
import base64
import runpy
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
PY3 = False
else:
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
runpy.run_module(mod_name='%(module_fqn)s', init_globals=dict(_module_fqn='%(module_fqn)s', _modlib_path=modlib_path),
run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
try:
import coverage
except ImportError:
print('{"msg": "Could not import `coverage` module.", "failed": true}')
sys.exit(1)
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = '''
try:
if PY3:
import importlib.util
if importlib.util.find_spec('coverage') is None:
raise ImportError
else:
import imp
imp.find_module('coverage')
except ImportError:
print('{"msg": "Could not find `coverage` module.", "failed": true}')
sys.exit(1)
'''
ANSIBALLZ_RLIMIT_TEMPLATE = '''
import resource
existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# adjust soft limit subject to existing hard limit
requested_soft = min(existing_hard, %(rlimit_nofile)d)
if requested_soft != existing_soft:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
except ValueError:
# some platforms (eg macOS) lie about their hard limit
pass
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
# dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages
# Do this instead of getting site-packages from distutils.sysconfig so we work when we
# haven't been installed
site_packages = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
CORE_LIBRARY_PATH_RE = re.compile(r'%s/(?P<path>ansible/modules/.*)\.(py|ps1)$' % re.escape(site_packages))
COLLECTION_PATH_RE = re.compile(r'/(?P<path>ansible_collections/[^/]+/[^/]+/plugins/modules/.*)\.(py|ps1)$')
# Detect new-style Python modules by looking for required imports:
# import ansible_collections.[my_ns.my_col.plugins.module_utils.my_module_util]
# from ansible_collections.[my_ns.my_col.plugins.module_utils import my_module_util]
# import ansible.module_utils[.basic]
# from ansible.module_utils[ import basic]
# from ansible.module_utils[.basic import AnsibleModule]
# from ..module_utils[ import basic]
# from ..module_utils[.basic import AnsibleModule]
NEW_STYLE_PYTHON_MODULE_RE = re.compile(
# Relative imports
br'(?:from +\.{2,} *module_utils.* +import |'
# Collection absolute imports:
br'from +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.* +import |'
br'import +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.*|'
# Core absolute imports
br'from +ansible\.module_utils.* +import |'
br'import +ansible\.module_utils\.)'
)
class ModuleDepFinder(ast.NodeVisitor):
def __init__(self, module_fqn, tree, is_pkg_init=False, *args, **kwargs):
"""
Walk the ast tree for the python module.
:arg module_fqn: The fully qualified name to reach this module in dotted notation.
example: ansible.module_utils.basic
:arg is_pkg_init: Inform the finder it's looking at a package init (eg __init__.py) to allow
relative import expansion to use the proper package level without having imported it locally first.
Save submodule[.submoduleN][.identifier] into self.submodules
when they are from ansible.module_utils or ansible_collections packages
self.submodules will end up with tuples like:
- ('ansible', 'module_utils', 'basic',)
- ('ansible', 'module_utils', 'urls', 'fetch_url')
- ('ansible', 'module_utils', 'database', 'postgres')
- ('ansible', 'module_utils', 'database', 'postgres', 'quote')
- ('ansible', 'module_utils', 'database', 'postgres', 'quote')
- ('ansible_collections', 'my_ns', 'my_col', 'plugins', 'module_utils', 'foo')
It's up to calling code to determine whether the final element of the
tuple are module names or something else (function, class, or variable names)
.. seealso:: :python3:class:`ast.NodeVisitor`
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self._tree = tree # squirrel this away so we can compare node parents to it
self.submodules = set()
self.optional_imports = set()
self.module_fqn = module_fqn
self.is_pkg_init = is_pkg_init
self._visit_map = {
Import: self.visit_Import,
ImportFrom: self.visit_ImportFrom,
}
self.visit(tree)
def generic_visit(self, node):
"""Overridden ``generic_visit`` that makes some assumptions about our
use case, and improves performance by calling visitors directly instead
of calling ``visit`` to offload calling visitors.
"""
generic_visit = self.generic_visit
visit_map = self._visit_map
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, (Import, ImportFrom)):
item.parent = node
visit_map[item.__class__](item)
elif isinstance(item, AST):
generic_visit(item)
visit = generic_visit
def visit_Import(self, node):
"""
Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
We save these as interesting submodules when the imported library is in ansible.module_utils
or ansible.collections
"""
for alias in node.names:
if (alias.name.startswith('ansible.module_utils.') or
alias.name.startswith('ansible_collections.')):
py_mod = tuple(alias.name.split('.'))
self.submodules.add(py_mod)
# if the import's parent is the root document, it's a required import, otherwise it's optional
if node.parent != self._tree:
self.optional_imports.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
"""
Handle from ansible.module_utils.MODLIB import [.MODLIBn] [as asname]
Also has to handle relative imports
We save these as interesting submodules when the imported library is in ansible.module_utils
or ansible.collections
"""
# FIXME: These should all get skipped:
# from ansible.executor import module_common
# from ...executor import module_common
# from ... import executor (Currently it gives a non-helpful error)
if node.level > 0:
# if we're in a package init, we have to add one to the node level (and make it none if 0 to preserve the right slicing behavior)
level_slice_offset = -node.level + 1 or None if self.is_pkg_init else -node.level
if self.module_fqn:
parts = tuple(self.module_fqn.split('.'))
if node.module:
# relative import: from .module import x
node_module = '.'.join(parts[:level_slice_offset] + (node.module,))
else:
# relative import: from . import x
node_module = '.'.join(parts[:level_slice_offset])
else:
# fall back to an absolute import
node_module = node.module
else:
# absolute import: from module import x
node_module = node.module
# Specialcase: six is a special case because of its
# import logic
py_mod = None
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node_module.startswith('ansible.module_utils'):
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
# from ansible.module_utils import MODULE1 [,MODULEn] [as asname]
py_mod = tuple(node_module.split('.'))
elif node_module.startswith('ansible_collections.'):
if node_module.endswith('plugins.module_utils') or '.plugins.module_utils.' in node_module:
# from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname]
# from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname]
# FIXME: Unhandled cornercase (needs to be ignored):
# from ansible_collections.ns.coll.plugins.[!module_utils].[FOO].plugins.module_utils import IDENTIFIER
py_mod = tuple(node_module.split('.'))
else:
# Not from module_utils so ignore. for instance:
# from ansible_collections.ns.coll.plugins.lookup import IDENTIFIER
pass
if py_mod:
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
# if the import's parent is the root document, it's a required import, otherwise it's optional
if node.parent != self._tree:
self.optional_imports.add(py_mod + (alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
with open(path, 'rb') as fd:
data = fd.read()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple(), remote_is_local=False):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
# FUTURE: add logical equivalence for python3 in the case of py3-only modules
interpreter_name = os.path.basename(interpreter).strip()
# name for interpreter var
interpreter_config = u'ansible_%s_interpreter' % interpreter_name
# key for config
interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
interpreter_out = None
# looking for python, rest rely on matching vars
if interpreter_name == 'python':
# skip detection for network os execution, use playbook supplied one if possible
if remote_is_local:
interpreter_out = task_vars['ansible_playbook_python']
# a config def exists for this interpreter type; consult config for the value
elif C.config.get_configuration_definition(interpreter_config_key):
interpreter_from_config = C.config.get_config_value(interpreter_config_key, variables=task_vars)
interpreter_out = templar.template(interpreter_from_config.strip())
# handle interpreter discovery if requested or empty interpreter was provided
if not interpreter_out or interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
facts_from_task_vars = task_vars.get('ansible_facts', {})
if discovered_interpreter_config not in facts_from_task_vars:
# interpreter discovery is desired, but has not been run for this host
raise InterpreterDiscoveryRequiredError("interpreter discovery needed", interpreter_name=interpreter_name, discovery_mode=interpreter_out)
else:
interpreter_out = facts_from_task_vars[discovered_interpreter_config]
else:
raise InterpreterDiscoveryRequiredError("interpreter discovery required", interpreter_name=interpreter_name, discovery_mode='auto_legacy')
elif interpreter_config in task_vars:
# for non python we consult vars for a possible direct override
interpreter_out = templar.template(task_vars.get(interpreter_config).strip())
if not interpreter_out:
# nothing matched(None) or in case someone configures empty string or empty intepreter
interpreter_out = interpreter
shebang = None
elif interpreter_out == interpreter:
# no change, no new shebang
shebang = None
else:
# set shebang cause we changed interpreter
shebang = u'#!' + interpreter_out
if args:
shebang = shebang + u' ' + u' '.join(args)
return shebang, interpreter_out
class ModuleUtilLocatorBase:
def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False):
self._is_ambiguous = is_ambiguous
# a child package redirection could cause intermediate package levels to be missing, eg
# from ansible.module_utils.x.y.z import foo; if x.y.z.foo is redirected, we may not have packages on disk for
# the intermediate packages x.y.z, so we'll need to supply empty packages for those
self._child_is_redirected = child_is_redirected
self._is_optional = is_optional
self.found = False
self.redirected = False
self.fq_name_parts = fq_name_parts
self.source_code = ''
self.output_path = ''
self.is_package = False
self._collection_name = None
# for ambiguous imports, we should only test for things more than one level below module_utils
# this lets us detect erroneous imports and redirections earlier
if is_ambiguous and len(self._get_module_utils_remainder_parts(fq_name_parts)) > 1:
self.candidate_names = [fq_name_parts, fq_name_parts[:-1]]
else:
self.candidate_names = [fq_name_parts]
@property
def candidate_names_joined(self):
return ['.'.join(n) for n in self.candidate_names]
def _handle_redirect(self, name_parts):
module_utils_relative_parts = self._get_module_utils_remainder_parts(name_parts)
# only allow redirects from below module_utils- if above that, bail out (eg, parent package names)
if not module_utils_relative_parts:
return False
try:
collection_metadata = _get_collection_metadata(self._collection_name)
except ValueError as ve: # collection not found or some other error related to collection load
if self._is_optional:
return False
raise AnsibleError('error processing module_util {0} loading redirected collection {1}: {2}'
.format('.'.join(name_parts), self._collection_name, to_native(ve)))
routing_entry = _nested_dict_get(collection_metadata, ['plugin_routing', 'module_utils', '.'.join(module_utils_relative_parts)])
if not routing_entry:
return False
# FIXME: add deprecation warning support
dep_or_ts = routing_entry.get('tombstone')
removed = dep_or_ts is not None
if not removed:
dep_or_ts = routing_entry.get('deprecation')
if dep_or_ts:
removal_date = dep_or_ts.get('removal_date')
removal_version = dep_or_ts.get('removal_version')
warning_text = dep_or_ts.get('warning_text')
msg = 'module_util {0} has been removed'.format('.'.join(name_parts))
if warning_text:
msg += ' ({0})'.format(warning_text)
else:
msg += '.'
display.deprecated(msg, removal_version, removed, removal_date, self._collection_name)
if 'redirect' in routing_entry:
self.redirected = True
source_pkg = '.'.join(name_parts)
self.is_package = True # treat all redirects as packages
redirect_target_pkg = routing_entry['redirect']
# expand FQCN redirects
if not redirect_target_pkg.startswith('ansible_collections'):
split_fqcn = redirect_target_pkg.split('.')
if len(split_fqcn) < 3:
raise Exception('invalid redirect for {0}: {1}'.format(source_pkg, redirect_target_pkg))
# assume it's an FQCN, expand it
redirect_target_pkg = 'ansible_collections.{0}.{1}.plugins.module_utils.{2}'.format(
split_fqcn[0], # ns
split_fqcn[1], # coll
'.'.join(split_fqcn[2:]) # sub-module_utils remainder
)
display.vvv('redirecting module_util {0} to {1}'.format(source_pkg, redirect_target_pkg))
self.source_code = self._generate_redirect_shim_source(source_pkg, redirect_target_pkg)
return True
return False
def _get_module_utils_remainder_parts(self, name_parts):
# subclasses should override to return the name parts after module_utils
return []
def _get_module_utils_remainder(self, name_parts):
# return the remainder parts as a package string
return '.'.join(self._get_module_utils_remainder_parts(name_parts))
def _find_module(self, name_parts):
return False
def _locate(self, redirect_first=True):
for candidate_name_parts in self.candidate_names:
if redirect_first and self._handle_redirect(candidate_name_parts):
break
if self._find_module(candidate_name_parts):
break
if not redirect_first and self._handle_redirect(candidate_name_parts):
break
else: # didn't find what we were looking for- last chance for packages whose parents were redirected
if self._child_is_redirected: # make fake packages
self.is_package = True
self.source_code = ''
else: # nope, just bail
return
if self.is_package:
path_parts = candidate_name_parts + ('__init__',)
else:
path_parts = candidate_name_parts
self.found = True
self.output_path = os.path.join(*path_parts) + '.py'
self.fq_name_parts = candidate_name_parts
def _generate_redirect_shim_source(self, fq_source_module, fq_target_module):
return """
import sys
import {1} as mod
sys.modules['{0}'] = mod
""".format(fq_source_module, fq_target_module)
# FIXME: add __repr__ impl
class LegacyModuleUtilLocator(ModuleUtilLocatorBase):
def __init__(self, fq_name_parts, is_ambiguous=False, mu_paths=None, child_is_redirected=False):
super(LegacyModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected)
if fq_name_parts[0:2] != ('ansible', 'module_utils'):
raise Exception('this class can only locate from ansible.module_utils, got {0}'.format(fq_name_parts))
if fq_name_parts[2] == 'six':
# FIXME: handle the ansible.module_utils.six._six case with a redirect or an internal _six attr on six itself?
# six creates its submodules at runtime; convert all these to just 'ansible.module_utils.six'
fq_name_parts = ('ansible', 'module_utils', 'six')
self.candidate_names = [fq_name_parts]
self._mu_paths = mu_paths
self._collection_name = 'ansible.builtin' # legacy module utils always look in ansible.builtin for redirects
self._locate(redirect_first=False) # let local stuff override redirects for legacy
def _get_module_utils_remainder_parts(self, name_parts):
return name_parts[2:] # eg, foo.bar for ansible.module_utils.foo.bar
def _find_module(self, name_parts):
rel_name_parts = self._get_module_utils_remainder_parts(name_parts)
# no redirection; try to find the module
if len(rel_name_parts) == 1: # direct child of module_utils, just search the top-level dirs we were given
paths = self._mu_paths
else: # a nested submodule of module_utils, extend the paths given with the intermediate package names
paths = [os.path.join(p, *rel_name_parts[:-1]) for p in
self._mu_paths] # extend the MU paths with the relative bit
if imp is None: # python3 find module
# find_spec needs the full module name
self._info = info = importlib.machinery.PathFinder.find_spec('.'.join(name_parts), paths)
if info is not None and os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES:
self.is_package = info.origin.endswith('/__init__.py')
path = info.origin
else:
return False
self.source_code = _slurp(path)
else: # python2 find module
try:
# imp just wants the leaf module/package name being searched for
info = imp.find_module(name_parts[-1], paths)
except ImportError:
return False
if info[2][2] == imp.PY_SOURCE:
fd = info[0]
elif info[2][2] == imp.PKG_DIRECTORY:
self.is_package = True
fd = open(os.path.join(info[1], '__init__.py'))
else:
return False
try:
self.source_code = fd.read()
finally:
fd.close()
return True
class CollectionModuleUtilLocator(ModuleUtilLocatorBase):
def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False):
super(CollectionModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected, is_optional)
if fq_name_parts[0] != 'ansible_collections':
raise Exception('CollectionModuleUtilLocator can only locate from ansible_collections, got {0}'.format(fq_name_parts))
elif len(fq_name_parts) >= 6 and fq_name_parts[3:5] != ('plugins', 'module_utils'):
raise Exception('CollectionModuleUtilLocator can only locate below ansible_collections.(ns).(coll).plugins.module_utils, got {0}'
.format(fq_name_parts))
self._collection_name = '.'.join(fq_name_parts[1:3])
self._locate()
def _find_module(self, name_parts):
# synthesize empty inits for packages down through module_utils- we don't want to allow those to be shipped over, but the
# package hierarchy needs to exist
if len(name_parts) < 6:
self.source_code = ''
self.is_package = True
return True
# NB: we can't use pkgutil.get_data safely here, since we don't want to import/execute package/module code on
# the controller while analyzing/assembling the module, so we'll have to manually import the collection's
# Python package to locate it (import root collection, reassemble resource path beneath, fetch source)
collection_pkg_name = '.'.join(name_parts[0:3])
resource_base_path = os.path.join(*name_parts[3:])
src = None
# look for package_dir first, then module
try:
src = pkgutil.get_data(collection_pkg_name, to_native(os.path.join(resource_base_path, '__init__.py')))
except ImportError:
pass
# TODO: we might want to synthesize fake inits for py3-style packages, for now they're required beneath module_utils
if src is not None: # empty string is OK
self.is_package = True
else:
try:
src = pkgutil.get_data(collection_pkg_name, to_native(resource_base_path + '.py'))
except ImportError:
pass
if src is None: # empty string is OK
return False
self.source_code = src
return True
def _get_module_utils_remainder_parts(self, name_parts):
return name_parts[5:] # eg, foo.bar for ansible_collections.ns.coll.plugins.module_utils.foo.bar
def recursive_finder(name, module_fqn, module_data, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module and its module_utils files needs. (no longer actually recursive)
:arg name: Name of the python module we're examining
:arg module_fqn: Fully qualified name of the python module we're scanning
:arg module_data: string Python code of the module we're scanning
:arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload
which we're assembling
"""
# py_module_cache maps python module names to a tuple of the code in the module
# and the pathname to the module.
# Here we pre-load it with modules which we create without bothering to
# read from actual files (In some cases, these need to differ from what ansible
# ships because they're namespace packages in the module)
# FIXME: do we actually want ns pkg behavior for these? Seems like they should just be forced to emptyish pkg stubs
py_module_cache = {
('ansible',): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n'
b'__version__="' + to_bytes(__version__) +
b'"\n__author__="' + to_bytes(__author__) + b'"\n',
'ansible/__init__.py'),
('ansible', 'module_utils'): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n',
'ansible/module_utils/__init__.py')}
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
# Parse the module code and find the imports of ansible.module_utils
try:
tree = compile(module_data, '<unknown>', 'exec', ast.PyCF_ONLY_AST)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
finder = ModuleDepFinder(module_fqn, tree)
# the format of this set is a tuple of the module name and whether or not the import is ambiguous as a module name
# or an attribute of a module (eg from x.y import z <-- is z a module or an attribute of x.y?)
modules_to_process = [ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) for m in finder.submodules]
# HACK: basic is currently always required since module global init is currently tied up with AnsiballZ arg input
modules_to_process.append(ModuleUtilsProcessEntry(('ansible', 'module_utils', 'basic'), False, False, is_optional=False))
# we'll be adding new modules inline as we discover them, so just keep going til we've processed them all
while modules_to_process:
modules_to_process.sort() # not strictly necessary, but nice to process things in predictable and repeatable order
py_module_name, is_ambiguous, child_is_redirected, is_optional = modules_to_process.pop(0)
if py_module_name in py_module_cache:
# this is normal; we'll often see the same module imported many times, but we only need to process it once
continue
if py_module_name[0:2] == ('ansible', 'module_utils'):
module_info = LegacyModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
mu_paths=module_utils_paths, child_is_redirected=child_is_redirected)
elif py_module_name[0] == 'ansible_collections':
module_info = CollectionModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
child_is_redirected=child_is_redirected, is_optional=is_optional)
else:
# FIXME: dot-joined result
display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
% [py_module_name])
continue
# Could not find the module. Construct a helpful error message.
if not module_info.found:
if is_optional:
# this was a best-effort optional import that we couldn't find, oh well, move along...
continue
# FIXME: use dot-joined candidate names
msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined)
raise AnsibleError(msg)
# check the cache one more time with the module we actually found, since the name could be different than the input
# eg, imported name vs module
if module_info.fq_name_parts in py_module_cache:
continue
# compile the source, process all relevant imported modules
try:
tree = compile(module_info.source_code, '<unknown>', 'exec', ast.PyCF_ONLY_AST)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (module_info.fq_name_parts, e.msg))
finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), tree, module_info.is_package)
modules_to_process.extend(ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports)
for m in finder.submodules if m not in py_module_cache)
# we've processed this item, add it to the output list
py_module_cache[module_info.fq_name_parts] = (module_info.source_code, module_info.output_path)
# ensure we process all ancestor package inits
accumulated_pkg_name = []
for pkg in module_info.fq_name_parts[:-1]:
accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
normalized_name = tuple(accumulated_pkg_name) # extra machinations to get a hashable type (list is not)
if normalized_name not in py_module_cache:
modules_to_process.append(ModuleUtilsProcessEntry(normalized_name, False, module_info.redirected, is_optional=is_optional))
for py_module_name in py_module_cache:
py_module_file_name = py_module_cache[py_module_name][1]
zf.writestr(py_module_file_name, py_module_cache[py_module_name][0])
mu_file = to_text(py_module_file_name, errors='surrogate_or_strict')
display.vvvvv("Including module_utils file %s" % mu_file)
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _get_ansible_module_fqn(module_path):
"""
Get the fully qualified name for an ansible module based on its pathname
remote_module_fqn is the fully qualified name. Like ansible.modules.system.ping
Or ansible_collections.Namespace.Collection_name.plugins.modules.ping
.. warning:: This function is for ansible modules only. It won't work for other things
(non-module plugins, etc)
"""
remote_module_fqn = None
# Is this a core module?
match = CORE_LIBRARY_PATH_RE.search(module_path)
if not match:
# Is this a module in a collection?
match = COLLECTION_PATH_RE.search(module_path)
# We can tell the FQN for core modules and collection modules
if match:
path = match.group('path')
if '.' in path:
# FQNs must be valid as python identifiers. This sanity check has failed.
# we could check other things as well
raise ValueError('Module name (or path) was not a valid python identifier')
remote_module_fqn = '.'.join(path.split('/'))
else:
# Currently we do not handle modules in roles so we can end up here for that reason
raise ValueError("Unable to determine module's fully qualified name")
return remote_module_fqn
def _add_module_to_zip(zf, remote_module_fqn, b_module_data):
"""Add a module from ansible or from an ansible collection into the module zip"""
module_path_parts = remote_module_fqn.split('.')
# Write the module
module_path = '/'.join(module_path_parts) + '.py'
zf.writestr(module_path, b_module_data)
# Write the __init__.py's necessary to get there
if module_path_parts[0] == 'ansible':
# The ansible namespace is setup as part of the module_utils setup...
start = 2
existing_paths = frozenset()
else:
# ... but ansible_collections and other toplevels are not
start = 1
existing_paths = frozenset(zf.namelist())
for idx in range(start, len(module_path_parts)):
package_path = '/'.join(module_path_parts[:idx]) + '/__init__.py'
# If a collections module uses module_utils from a collection then most packages will have already been added by recursive_finder.
if package_path in existing_paths:
continue
# Note: We don't want to include more than one ansible module in a payload at this time
# so no need to fill the __init__.py with namespace code
zf.writestr(package_path, b'')
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment, remote_is_local=False):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif NEW_STYLE_PYTHON_MODULE_RE.search(b_module_data):
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
try:
remote_module_fqn = _get_ansible_module_fqn(module_path)
except ValueError:
# Modules in roles currently are not found by the fqn heuristic so we
# fallback to this. This means that relative imports inside a module from
# a role may fail. Absolute imports should be used for future-proofness.
# People should start writing collections instead of modules in roles so we
# may never fix this
display.debug('ANSIBALLZ: Could not determine module FQN')
remote_module_fqn = 'ansible.modules.%s' % module_name
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
try:
python_repred_params = repr(json.dumps(params, cls=AnsibleJSONEncoder, vault_to_text=True))
except TypeError as e:
raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
with open(cached_module_filename, 'rb') as module_data:
zipdata = module_data.read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# walk the module imports, looking for module_utils to send- they'll be added to the zipfile
recursive_finder(module_name, remote_module_fqn, b_module_data, zf)
display.debug('ANSIBALLZ: Writing module into payload')
_add_module_to_zip(zf, remote_module_fqn, b_module_data)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
#
# FIXME: Once split controller/remote is merged, this can be simplified to
# os.makedirs(lookup_path, exist_ok=True)
if not os.path.exists(lookup_path):
try:
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
except OSError:
# Multiple processes tried to create the directory. If it still does not
# exist, raise the original exception.
if not os.path.exists(lookup_path):
raise
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
with open(cached_module_filename, 'rb') as f:
zipdata = f.read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar, remote_is_local=remote_is_local)
if shebang is None:
shebang = u'#!/usr/bin/python'
# FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
rlimit_nofile = int(templar.template(rlimit_nofile))
if rlimit_nofile:
rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
rlimit_nofile=rlimit_nofile,
)
else:
rlimit = ''
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT']
if coverage_output:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=coverage_output,
)
else:
# Verify coverage is available without importing it.
# This will detect when a module would fail with coverage enabled with minimal overhead.
coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
module_fqn=remote_module_fqn,
params=python_repred_params,
shebang=shebang,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
rlimit=rlimit,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_path, module_args, environment,
async_timeout, become, become_method, become_user, become_password,
become_flags, module_substyle, task_vars, remote_module_fqn
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args, cls=AnsibleJSONEncoder, vault_to_text=True))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None, remote_is_local=False):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment, remote_is_local=remote_is_local)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:], remote_is_local=remote_is_local)[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
def get_action_args_with_defaults(action, args, defaults, templar, redirected_names=None, action_groups=None):
if redirected_names:
resolved_action_name = redirected_names[-1]
else:
resolved_action_name = action
if redirected_names is not None:
msg = (
"Finding module_defaults for the action %s. "
"The caller passed a list of redirected action names, which is deprecated. "
"The task's resolved action should be provided as the first argument instead."
)
display.deprecated(msg % resolved_action_name, version='2.16')
# Get the list of groups that contain this action
if action_groups is None:
msg = (
"Finding module_defaults for action %s. "
"The caller has not passed the action_groups, so any "
"that may include this action will be ignored."
)
display.warning(msg=msg)
group_names = []
else:
group_names = action_groups.get(resolved_action_name, [])
tmp_args = {}
module_defaults = {}
# Merge latest defaults into dict, since they are a list of dicts
if isinstance(defaults, list):
for default in defaults:
module_defaults.update(default)
# module_defaults keys are static, but the values may be templated
module_defaults = templar.template(module_defaults)
for default in module_defaults:
if default.startswith('group/'):
group_name = default.split('group/')[-1]
if group_name in group_names:
tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy())
# handle specific action defaults
tmp_args.update(module_defaults.get(resolved_action_name, {}).copy())
# direct args override all
tmp_args.update(args)
return tmp_args
|
thnee/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 66,690
|
[
"VisIt"
] |
bed7fb4c0817d7a23ca543f9e3cdb605c79667e793917fb6b993a2e7e87b96ff
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import numpy as np
from ..sile import add_sile, get_sile
from .sile import SileSiesta
from sisl._internal import set_module
from sisl._help import xml_parse
from sisl.utils import (
default_ArgumentParser, default_namespace,
collect_action, run_actions,
strmap, lstranges,
direction
)
from sisl.messages import warn, SislWarning
from sisl._array import arrayd, arrayi, emptyd, asarrayi
from sisl.atom import PeriodicTable, Atom, Atoms
from sisl.geometry import Geometry
from sisl.orbital import AtomicOrbital
from sisl.unit.siesta import unit_convert
__all__ = ['pdosSileSiesta']
Bohr2Ang = unit_convert('Bohr', 'Ang')
@set_module("sisl.io.siesta")
class pdosSileSiesta(SileSiesta):
""" Projected DOS file with orbital information
Data file containing the PDOS as calculated by Siesta.
"""
def read_geometry(self):
""" Read the geometry with coordinates and correct orbital counts """
return self.read_data()[0]
def read_fermi_level(self):
""" Returns the fermi-level """
# Get the element-tree
root = xml_parse(self.file).getroot()
# Try and find the fermi-level
Ef = root.find('fermi_energy')
if Ef is None:
warn(f"{self!s}.read_data could not locate the Fermi-level in the XML tree")
return Ef
def read_data(self, as_dataarray=False):
r""" Returns data associated with the PDOS file
For spin-polarized calculations the returned values are up/down, orbitals, energy.
For non-collinear calculations the returned values are sum/x/y/z, orbitals, energy.
Parameters
----------
as_dataarray: bool, optional
If True the returned PDOS is a `xarray.DataArray` with energy, spin
and orbital information as coordinates in the data.
The geometry, unit and Fermi level are stored as attributes in the
DataArray.
Returns
-------
geom : Geometry instance with positions, atoms and orbitals.
E : the energies at which the PDOS has been evaluated at (if Fermi-level present in file energies are shifted to :math:`E - E_F = 0`).
PDOS : an array of DOS with dimensions ``(nspin, geom.no, len(E))`` (with different spin-components) or ``(geom.no, len(E))`` (spin-symmetric).
DataArray : if `as_dataarray` is True, only this data array is returned, in this case all data can be post-processed using the `xarray` selection routines.
"""
# Get the element-tree
root = xml_parse(self.file).getroot()
# Get number of orbitals
nspin = int(root.find('nspin').text)
# Try and find the fermi-level
Ef = root.find('fermi_energy')
E = arrayd(root.find('energy_values').text.split())
if Ef is None:
warn(str(self) + '.read_data could not locate the Fermi-level in the XML tree, using E_F = 0. eV')
else:
Ef = float(Ef.text)
E -= Ef
ne = len(E)
# All coordinate, atoms and species data
xyz = []
atoms = []
atom_species = []
def ensure_size(ia):
while len(atom_species) <= ia:
atom_species.append(None)
xyz.append(None)
def ensure_size_orb(ia, i):
while len(atoms) <= ia:
atoms.append([])
while len(atoms[ia]) <= i:
atoms[ia].append(None)
if nspin == 4:
def process(D):
tmp = np.empty(D.shape[0], D.dtype)
tmp[:] = D[:, 3]
D[:, 3] = D[:, 0] - D[:, 1]
D[:, 0] = D[:, 0] + D[:, 1]
D[:, 1] = D[:, 2]
D[:, 2] = tmp[:]
return D
else:
def process(D):
return D
if as_dataarray:
import xarray as xr
if nspin == 1:
spin = ['sum']
elif nspin == 2:
spin = ['up', 'down']
elif nspin == 4:
spin = ['sum', 'x', 'y' 'z']
# Dimensions of the PDOS data-array
dims = ['E', 'spin', 'n', 'l', 'm', 'zeta', 'polarization']
shape = (ne, nspin, 1, 1, 1, 1, 1)
def to(o, DOS):
# Coordinates for this dataarray
coords = [E, spin,
[o.n], [o.l], [o.m], [o.zeta], [o.P]]
return xr.DataArray(data=process(DOS).reshape(shape),
dims=dims, coords=coords, name='PDOS')
else:
def to(o, DOS):
return process(DOS)
D = []
for orb in root.findall('orbital'):
# Short-hand function to retrieve integers for the attributes
def oi(name):
return int(orb.get(name))
# Get indices
ia = oi('atom_index') - 1
i = oi('index') - 1
species = orb.get('species')
# Create the atomic orbital
try:
Z = oi('Z')
except:
try:
Z = PeriodicTable().Z(species)
except:
# Unknown
Z = -1
try:
P = orb.get('P') == 'true'
except:
P = False
ensure_size(ia)
xyz[ia] = arrayd(orb.get('position').split())
atom_species[ia] = Z
# Construct the atomic orbital
O = AtomicOrbital(n=oi('n'), l=oi('l'), m=oi('m'), zeta=oi('z'), P=P)
# We know that the index is far too high. However,
# this ensures a consecutive orbital
ensure_size_orb(ia, i)
atoms[ia][i] = O
# it is formed like : spin-1, spin-2 (however already in eV)
DOS = arrayd(orb.find('data').text.split()).reshape(-1, nspin)
D.append(to(O, DOS))
# Now we need to parse the data
# First reduce the atom
atoms = [[o for o in a if o] for a in atoms]
atoms = Atoms(map(Atom, atom_species, atoms))
geom = Geometry(arrayd(xyz) * Bohr2Ang, atoms)
if as_dataarray:
# Create a new dimension without coordinates (orbital index)
D = xr.concat(D, 'orbital')
# Add attributes
D.attrs['geometry'] = geom
D.attrs['unit'] = '1/eV'
if Ef is None:
D.attrs['Ef'] = 'Unknown'
else:
D.attrs['Ef'] = Ef
return D
D = np.moveaxis(np.stack(D, axis=0), 2, 0)
if nspin == 1:
return geom, E, D[0]
return geom, E, D
@default_ArgumentParser(description="""
Extract/Plot data from a PDOS/PDOS.xml file
The arguments are parsed as they are passed to the command line; hence order is important.
Consider the following:
--spin x --atom all --spin y --atom all --plot
This will plot the spin x and y components for all atoms, with no normalization.
--norm orbital --atom all --spin x --atom 1 --plot
This will normalize the PDOS to the number of projected orbitals in each --atom argument.
The --atom all plots the total DOS (no spin direction), then the 2nd plotted line has only
the x-component of the first atom. Since the normalization in both cases are "orbital" one
can directly compare the values.
One can collect as many curves as necessary, for every --plot/--out argument the data will
be plotted/saved and all prior options will be reset. Hence
--spin x --atom all --out spin_x_all.dat --spin y --atom all --out spin_y_all.dat
will store the spin x/y components of all atoms in spin_x_all.dat/spin_y_all.dat, respectively.
""")
def ArgumentParser(self, p=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
# We limit the import to occur here
import argparse
import warnings
comment = 'Fermi-level shifted to 0'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
geometry, E, PDOS = self.read_data()
if len(w) > 0:
if issubclass(w[-1].category, SislWarning):
comment = 'Fermi-level unknown'
def norm(geom, orbitals=None, norm='none'):
r""" Normalization factor depending on the input
The normalization can be performed in one of the below methods.
In the following :math:`N` refers to the normalization constant
that is to be used (i.e. the divisor):
``'none'``
:math:`N=1`
``'all'``
:math:`N` equals the number of orbitals in the total geometry
``'atom'``
:math:`N` equals the total number of orbitals in the selected
atoms. If `orbitals` is an argument a conversion of `orbitals` to the equivalent
unique atoms is performed, and subsequently the total number of orbitals on the
atoms is used. This makes it possible to compare the fraction of orbital DOS easier.
``'orbital'``
:math:`N` is the sum of selected orbitals, if `atoms` is specified, this
is equivalent to the 'atom' option.
Parameters
----------
orbitals : array_like of int or bool, optional
only return for a given set of orbitals (default to all)
norm : {'none', 'atom', 'orbital', 'all'}
how the normalization of the summed DOS is performed (see `norm` routine)
"""
# Cast to lower
norm = norm.lower()
if norm == 'none':
NORM = 1
elif norm in ['all', 'atom', 'orbital']:
NORM = geom.no
else:
raise ValueError(f"norm error on norm keyword in when requesting normalization!")
# If the user requests all orbitals
if orbitals is None:
return NORM
# Now figure out what to do
# Get pivoting indices to average over
if norm == 'orbital':
NORM = len(orbitals)
elif norm == 'atom':
a = np.unique(geom.o2a(orbitals))
# Now sum the orbitals per atom
NORM = geom.orbitals[a].sum()
return NORM
def _sum_filter(PDOS):
""" Default sum is the total DOS, no projection on directions """
if PDOS.ndim == 2:
# non-polarized
return PDOS
elif PDOS.shape[0] == 2:
# polarized
return PDOS.sum(0)
return PDOS[0]
namespace = default_namespace(_geometry=geometry,
_E=E,
_PDOS=PDOS,
# The energy range of all data
_Erng=None,
_norm="none",
_PDOS_filter_name='total',
_PDOS_filter=_sum_filter,
_data=[],
_data_description=[],
_data_header=[])
def ensure_E(func):
""" This decorater ensures that E is the first element in the _data container """
def assign_E(self, *args, **kwargs):
ns = args[1]
if len(ns._data) == 0:
# We immediately extract the energies
ns._data.append(ns._E[ns._Erng].flatten())
ns._data_header.append('Energy[eV]')
return func(self, *args, **kwargs)
return assign_E
class ERange(argparse.Action):
def __call__(self, parser, ns, value, option_string=None):
E = ns._E
Emap = strmap(float, value, E.min(), E.max())
def Eindex(e):
return np.abs(E - e).argmin()
# Convert to actual indices
E = []
for begin, end in Emap:
if begin is None and end is None:
ns._Erng = None
return
elif begin is None:
E.append(range(Eindex(end)+1))
elif end is None:
E.append(range(Eindex(begin), len(E)))
else:
E.append(range(Eindex(begin), Eindex(end)+1))
# Issuing unique also sorts the entries
ns._Erng = np.unique(arrayi(E).flatten())
p.add_argument('--energy', '-E', action=ERange,
help="""Denote the sub-section of energies that are extracted: "-1:0,1:2" [eV]
This flag takes effect on all energy-resolved quantities and is reset whenever --plot or --out is called""")
# The normalization method
class NormAction(argparse.Action):
@collect_action
def __call__(self, parser, ns, value, option_string=None):
ns._norm = value
p.add_argument('--norm', '-N', action=NormAction, default='atom',
choices=['none', 'atom', 'orbital', 'all'],
help="""Specify the normalization method; "none") no normalization, "atom") total orbitals in selected atoms,
"orbital") selected orbitals or "all") all orbitals.
Will only take effect on subsequent --atom ranges.
This flag is reset whenever --plot or --out is called""")
if PDOS.ndim == 2:
# no spin is possible
pass
elif PDOS.shape[0] == 2:
# Add a spin-action
class Spin(argparse.Action):
@collect_action
def __call__(self, parser, ns, value, option_string=None):
value = value[0].lower()
if value in ("up", "u"):
name = "up"
def _filter(PDOS):
return PDOS[0]
elif value in ("down", "dn", "dw", "d"):
name = "down"
def _filter(PDOS):
return PDOS[1]
elif value in ("sum", "+", "total"):
name = "total"
def _filter(PDOS):
return PDOS.sum(0)
else:
raise ValueError(f"Wrong argument for --spin [up, down, sum], found {value}")
ns._PDOS_filter_name = name
ns._PDOS_filter = _filter
p.add_argument('--spin', '-S', action=Spin, nargs=1,
help="Which spin-component to store, up/u, down/d or sum/+/total")
elif PDOS.shape[0] == 4:
# Add a spin-action
class Spin(argparse.Action):
@collect_action
def __call__(self, parser, ns, value, option_string=None):
value = value[0].lower()
if value in ("sum", "+", "total"):
name = "total"
def _filter(PDOS):
return PDOS[0]
else:
# the stuff must be a range of directions
# so simply put it in
idx = list(map(direction, value))
name = value
def _filter(PDOS):
return PDOS[idx].sum(0)
ns._PDOS_filter_name = name
ns._PDOS_filter = _filter
p.add_argument('--spin', '-S', action=Spin, nargs=1,
help="Which spin-component to store, sum/+/total, x, y, z or a sum of either of the directions xy, zx etc.")
def parse_atom_range(geom, value):
if value.lower() in ("all", ":"):
return np.arange(geom.no), "all"
value = ",".join(# ensure only single commas (no space between them)
"".join(# ensure no empty whitespaces
",".join(# join different lines with a comma
value.splitlines())
.split())
.split(","))
# Sadly many shell interpreters does not
# allow simple [] because they are expansion tokens
# in the shell.
# We bypass this by allowing *, [, {
# * will "only" fail if files are named accordingly, else
# it will be passed as-is.
# { [ *
sep = ['c', 'b', '*']
failed = True
while failed and len(sep) > 0:
try:
ranges = lstranges(strmap(int, value, 0, len(geom), sep.pop()))
failed = False
except:
pass
if failed:
print(value)
raise ValueError("Could not parse the atomic/orbital ranges")
# we have only a subset of the orbitals
orbs = []
no = 0
for atoms in ranges:
if isinstance(atoms, list):
# Get atoms and orbitals
ob = geom.a2o(atoms[0] - 1, True)
# We normalize for the total number of orbitals
# on the requested atoms.
# In this way the user can compare directly the DOS
# for same atoms with different sets of orbitals and the
# total will add up.
no += len(ob)
ob = ob[asarrayi(atoms[1]) - 1]
else:
ob = geom.a2o(atoms - 1, True)
no += len(ob)
orbs.append(ob)
if len(orbs) == 0:
print('Available atoms:')
print(f' 1-{len(geometry)}')
print('Input atoms:')
print(' ', value)
raise ValueError('Atomic/Orbital requests are not fully included in the device region.')
# Add one to make the c-index equivalent to the f-index
return np.concatenate(orbs).flatten(), value
# Try and add the atomic specification
class AtomRange(argparse.Action):
@collect_action
@ensure_E
def __call__(self, parser, ns, value, option_string=None):
# get which orbitals to extract
orbs, value = parse_atom_range(ns._geometry, value)
# calculate the normalization
scale = norm(ns._geometry, orbs, ns._norm)
# Calculate PDOS on the selected atoms with the norm
ns._data.append(ns._PDOS_filter(ns._PDOS)[orbs].sum(0) / scale)
index = len(ns._data)
if value == "all":
DOS = "DOS"
else:
DOS = "PDOS"
if ns._PDOS_filter_name is not None:
ns._data_header.append(f"{DOS}[spin={ns._PDOS_filter_name}:{value}][1/eV]")
ns._data_description.append(f"Column {index} is the sum of spin={ns._PDOS_filter_name} on atoms[orbs] {value} with normalization 1/{scale}")
else:
ns._data_header.append(f"{DOS}[{value}][1/eV]")
ns._data_description.append(f"Column {index} is the total PDOS on atoms[orbs] {value} with normalization 1/{scale}")
p.add_argument('--atom', '-a', type=str, action=AtomRange,
help="""Limit orbital resolved PDOS to a sub-set of atoms/orbitals: "1-2[3,4]" will yield the 1st and 2nd atom and their 3rd and fourth orbital. Multiple comma-separated specifications are allowed. Note that some shells does not allow [] as text-input (due to expansion), {, [ or * are allowed orbital delimiters.
Multiple options will create a new column/line in output, the --norm and --E should be before any of these arguments""")
class Out(argparse.Action):
@run_actions
def __call__(self, parser, ns, value, option_string=None):
out = value[0]
try:
# We figure out if the user wants to write
# to a geometry
obj = get_sile(out, mode='w')
if hasattr(obj, 'write_geometry'):
with obj as fh:
fh.write_geometry(ns._geometry)
return
raise NotImplementedError
except:
pass
if len(ns._data) == 0:
ns._data.append(ns._E)
ns._data_header.append('Energy[eV]')
ns._data.append(ns._PDOS_filter(ns._PDOS).sum(0))
if ns._PDOS_filter_name is not None:
ns._data_header.append(f"DOS[spin={ns._PDOS_filter_name}][1/eV]")
else:
ns._data_header.append("DOS[1/eV]")
from sisl.io import tableSile
tableSile(out, mode='w').write(*ns._data,
comment=[comment] + ns._data_description,
header=ns._data_header)
# Clean all data
ns._norm = "none"
ns._data = []
ns._data_header = []
ns._data_description = []
ns._PDOS_filter_name = None
ns._PDOS_filter = _sum_filter
ns._Erng = None
p.add_argument('--out', '-o', nargs=1, action=Out,
help='Store currently collected PDOS (at its current invocation) to the out file.')
class Plot(argparse.Action):
@run_actions
def __call__(self, parser, ns, value, option_string=None):
if len(ns._data) == 0:
ns._data.append(ns._E)
ns._data_header.append('Energy[eV]')
ns._data.append(ns._PDOS_filter(ns._PDOS).sum(0))
if ns._PDOS_filter_name is not None:
ns._data_header.append(f"DOS[spin={ns._PDOS_filter_name}][1/eV]")
else:
ns._data_header.append("DOS[1/eV]")
from matplotlib import pyplot as plt
plt.figure()
def _get_header(header):
header = (header
.replace("PDOS", "")
.replace("DOS", "")
.replace("[1/eV]", "")
)
if len(header) == 0:
return "Total"
if header.startswith("["):
header = header[1:]
if header.endswith("]"):
header = header[:-1]
return header
kwargs = {}
if len(ns._data) > 2:
kwargs['alpha'] = 0.6
for i in range(1, len(ns._data)):
plt.plot(ns._data[0], ns._data[i], label=_get_header(ns._data_header[i]), **kwargs)
plt.ylabel('DOS [1/eV]')
if 'unknown' in comment:
plt.xlabel('E [eV]')
else:
plt.xlabel('E - E_F [eV]')
plt.legend(loc=8, ncol=3, bbox_to_anchor=(0.5, 1.0))
if value is None:
plt.show()
else:
plt.savefig(value)
# Clean all data
ns._norm = "none"
ns._data = []
ns._data_header = []
ns._data_description = []
ns._PDOS_filter_name = None
ns._PDOS_filter = _sum_filter
ns._Erng = None
p.add_argument('--plot', '-p', action=Plot, nargs='?', metavar='FILE',
help='Plot the currently collected information (at its current invocation).')
return p, namespace
# PDOS files are:
# They contain the same file (same xml-data)
# However, pdos.xml is preferred because it has higher precision.
# siesta.PDOS
add_sile('PDOS', pdosSileSiesta, gzip=True)
# pdos.xml/siesta.PDOS.xml
add_sile('PDOS.xml', pdosSileSiesta, gzip=True)
|
zerothi/sisl
|
sisl/io/siesta/pdos.py
|
Python
|
mpl-2.0
| 25,117
|
[
"SIESTA"
] |
8b5f329ea5a82196c4abb474b86e8e956c0319558c0547268b0892ae45e16321
|
from queue import PriorityQueue
import itertools
def show(customer, time, activity):
print('[%3d] %s: %s' % (time, customer, activity))
def visit(customer, arrival):
time = arrival
show(customer, time, 'arrives at branch')
time = yield
show(customer, time, 'waiting in line')
time = yield
show(customer, time, 'doing transactions')
time = yield
show(customer, time, 'leaves branch')
def enqueue(line, visit, time):
visit.send(time)
line.put(visit)
def serve(line, clock):
while not line.empty():
visit = line.get()
visit.send(next(clock))
def main(clock):
visit_a = visit('A', next(clock))
visit_b = visit('B', next(clock))
next(visit_a)
next(visit_b)
line = Queue()
enqueue(line, visit_a, next(clock))
enqueue(line, visit_b, next(clock))
serve(line, clock)
if __name__ == '__main__':
main(itertools.count())
|
garoa/concorrente.py
|
coroutines/waitsim.py
|
Python
|
cc0-1.0
| 915
|
[
"VisIt"
] |
fbfbc41c37f942202150da4c94b9a4ef58e383335f66081facbaee1421945495
|
"""
Derived from Brian Naughton @ http://blog.booleanbiotech.com/genetic_engineering_pipeline_python.html
"""
from __future__ import print_function
import datetime
import math
import re
import autoprotocol
from autoprotocol import Unit
from autoprotocol.unit import UnitValueError
from autoprotocol.container import Container
from autoprotocol.container_type import _CONTAINER_TYPES, ContainerType
from autoprotocol.protocol import Protocol, WellGroup, Well
from autoprotocol.protocol import Ref # "Link a ref name (string) to a Container instance."
import requests
import logging
import json
import sys
import numpy
import os
import requests
from lib import round_up
from requests.packages.urllib3.exceptions import InsecureRequestWarning, InsecurePlatformWarning, SNIMissingWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(SNIMissingWarning)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
#http debugging
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
#change this to 2 to show raw http request/responses
http_client.HTTPConnection.debuglevel = 0
experiment_name = ''
# Transcriptic authorization
CONFIG_INITIALIZED = False
TSC_HEADERS = None
ORG_NAME = None
def initialize_config():
global TSC_HEADERS, CONFIG_INITIALIZED, ORG_NAME
if CONFIG_INITIALIZED: return
if "--test" in sys.argv:
auth_file = '../test_mode_auth.json'
else:
auth_file = '../auth.json'
auth_file_path = os.path.join(os.path.dirname(__file__), auth_file)
auth_config = json.load(open(auth_file_path))
TSC_HEADERS = {k:v for k,v in auth_config.items() if k in ["X_User_Email","X_User_Token"]}
ORG_NAME = auth_config['org_name']
CONFIG_INITIALIZED = True
# Correction to Transcriptic-specific dead volumes
_CONTAINER_TYPES['96-deep-kf'] = _CONTAINER_TYPES['96-deep-kf']._replace(cover_types = ["standard"])
_CONTAINER_TYPES['6-flat-tc'] = ContainerType(name="6-well tissue cell culture plate",
well_count=6,
well_depth_mm=None,
well_volume_ul=Unit(5000.0, "microliter"),
well_coating=None,
sterile=False,
cover_types=["standard", "universal"],
seal_types=None,
capabilities=["cover", "incubate", "image_plate"],
shortname="6-flat-tc",
is_tube=False,
col_count=3,
dead_volume_ul=Unit(400, "microliter"),
safe_min_volume_ul=Unit(600, "microliter"))
_CONTAINER_TYPES['96-flat-tc'] = ContainerType(name="96-well tissue cell culture flat-bottom plate",
well_count=96,
well_depth_mm=None,
well_volume_ul=Unit(340.0, "microliter"),
well_coating=None,
sterile=False,
is_tube=False,
cover_types=["standard", "universal", "low_evaporation"],
seal_types=None,
capabilities=["pipette", "spin", "absorbance",
"fluorescence", "luminescence",
"incubate", "gel_separate",
"gel_purify", "cover", "stamp",
"dispense"],
shortname="96-flat",
col_count=12,
dead_volume_ul=Unit(25, "microliter"),
safe_min_volume_ul=Unit(65, "microliter"))
_CONTAINER_TYPES['screw-cap-1.8'] = ContainerType(name="2mL Microcentrifuge tube",
well_count=1,
well_depth_mm=None,
well_volume_ul=Unit(1800.0, "microliter"),
well_coating=None,
sterile=False,
cover_types=None,
seal_types=None,
capabilities=["pipette", "gel_separate",
"gel_purify", "incubate", "spin"],
shortname="micro-2.0",
is_tube=True,
col_count=1,
dead_volume_ul=Unit(15, "microliter"),
safe_min_volume_ul=Unit(40, "microliter")
)
def set_property(wellorcontainer, property_name, value):
"""
Sets a property on all wells in a container
"""
wells = convert_to_wellgroup(wellorcontainer)
if not isinstance(value, str):
value = str(value)
for well in wells:
assert isinstance(well, Well)
well.properties[property_name] = value
def copy_cell_line_name(from_wellorcontainer, to_wellorcontainer):
set_property(to_wellorcontainer,'cell_line_name',get_cell_line_name(from_wellorcontainer))
def get_cell_line_name(wellorcontainer):
wells = convert_to_wellgroup(wellorcontainer)
return wells[0].properties['cell_line_name']
def init_inventory_container(container,headers=None, org_name=None):
initialize_config()
headers = headers if headers else TSC_HEADERS
org_name = org_name if org_name else ORG_NAME
def _container_url(container_id):
return 'https://secure.transcriptic.com/{}/samples/{}.json'.format(org_name, container_id)
response = requests.get(_container_url(container.id), headers=headers, verify=False)
response.raise_for_status()
container_json = response.json()
container.cover = container_json['cover']
for well in container.all_wells():
init_inventory_well(well,container_json=container_json)
#@TODO: this needs to be mocked in tests since it hits the transcriptic api
def init_inventory_well(well, headers=None, org_name=None,container_json=None):
"""Initialize well (set volume etc) for Transcriptic"""
initialize_config()
headers = headers if headers else TSC_HEADERS
org_name = org_name if org_name else ORG_NAME
def _container_url(container_id):
return 'https://secure.transcriptic.com/{}/samples/{}.json'.format(org_name, container_id)
#only initialize containers that have already been made
if not well.container.id:
well.volume = ul(0)
return
if container_json:
container = container_json
else:
response = requests.get(_container_url(well.container.id), headers=headers)
response.raise_for_status()
container = response.json()
well_data = list(filter(lambda w: w['well_idx'] == well.index,container['aliquots']))
#correct the cover status on the container
#they don't return info on empty wells
if not well_data:
well.volume = ul(0)
return
well_data = well_data[0]
well.name = "{}".format(well_data['name']) if well_data['name'] is not None else container["label"]
well.properties = well_data['properties']
if well_data.get('resource'):
well.properties['Resource'] = well_data['resource']['name']
well.volume = Unit(well_data['volume_ul'], 'microliter')
if 'ERROR' in well.properties:
raise ValueError("Well {} has ERROR property: {}".format(well, well.properties["ERROR"]))
#if well.volume < Unit(20, "microliter"):
# logging.warn("Low volume for well {} : {}".format(well.name, well.volume))
return True
def put_well_data(container_id, well_index, data_obj, headers=None, org_name=None,container_json=None):
"""Update a well with new data"""
initialize_config()
headers = headers if headers else TSC_HEADERS
org_name = org_name if org_name else ORG_NAME
def _well_url(container_id, well_index):
return 'https://secure.transcriptic.com/{}/inventory/samples/{}/{}'.format(org_name, container_id, well_index)
headers['content-type'] = 'application/json'
response = requests.put(_well_url(container_id, well_index), headers=headers,
data=json.dumps(data_obj),
verify=False
)
response.raise_for_status()
def set_well_name(well_or_wells, name):
wells = convert_to_wellgroup(well_or_wells)
for well in wells:
well.name = name
def uniquify(s):
""" Converts a string into a unique string by including the timestamp"""
curr_time = datetime.datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
return s+'_%s'%curr_time
def total_plate_available_volume(plate, first_well_index=0):
if not first_well_index:
wells = plate.all_wells()
else:
wells = plate.all_wells()[first_well_index:]
return (sum([get_well_max_volume(well) for well in wells]) -\
total_plate_volume(plate)).to('microliter')
def total_plate_volume(plate,aspiratable=False):
""" Deprecated: use get_volume"""
assert isinstance(plate, Container)
return get_volume(plate,aspiratable)
def floor_volume(volume):
"""
Return the math.floor of a volume in microliters
"""
return ul(math.floor(volume.to('microliter').magnitude))
def get_volume(entity,aspiratable=False):
"""
Returns the total volume in the well, wellgroup, container, or list containing any of the previous
"""
wells = convert_to_wellgroup(entity)
if aspiratable:
return sum([max(well.volume - get_well_dead_volume(well),ul(0)) for well in wells]).to('microliter')
else:
return sum([well.volume for well in wells]).to('microliter')
def assert_non_negative_well(well):
if well.volume<ul(0):
raise Exception('Well volume can\'t be negative for well %s'%well)
def get_well_dead_volume(wellorcontainer):
if isinstance(wellorcontainer,Container):
well = wellorcontainer.well(0)
else:
well = wellorcontainer
assert_non_negative_well(well)
return well.container.container_type.dead_volume_ul.to('microliter')
def get_well_safe_volume(wellorcontainer):
if isinstance(wellorcontainer,Container):
well = wellorcontainer.well(0)
else:
well = wellorcontainer
assert_non_negative_well(well)
return well.container.container_type.safe_min_volume_ul.to('microliter')
def get_well_max_volume(wellorcontainer, mammalian_cell_mode=False):
"""
Get the max volume of a set of wells. If mammalian_cell_mode=False, we don't allow more than 100uL in
6-flat plates to prevent adding too much volume
"""
if isinstance(wellorcontainer,Container):
well = wellorcontainer.well(0)
else:
well = wellorcontainer
assert_non_negative_well(well)
if well.container.container_type.shortname == '6-flat':
return ul(100)
else:
return well.container.container_type.well_volume_ul.to('microliter')
def space_available(well, first_well_index=0):
"""
Volume remaining in the well
"""
if isinstance(well, Container):
return (total_plate_available_volume(well, first_well_index)).to('microliter')
return (get_well_max_volume(well) - well.volume).to('microliter')
def touchdown_pcr(fromC, toC, durations, stepsize=2, meltC=98, extC=72):
"""Touchdown PCR protocol generator
Doesn't include the toC as a step.
"""
assert 0 < stepsize < toC < fromC
def td(temp, dur): return {"temperature":"{:2g}:celsius".format(temp), "duration":"{:d}:second".format(dur)}
return [{"cycles": 1, "steps": [td(meltC, durations[0]), td(C, durations[1]), td(extC, durations[2])]}
for C in numpy.arange(fromC, toC, -stepsize)]
def convert_ug_to_pmol(ug_dsDNA, num_nts):
"""Convert ug dsDNA to pmol"""
return float(ug_dsDNA)/num_nts * (1e6 / 660.0)
def expid(val,expt_name=None):
"""Generate a unique ID per experiment"""
global experiment_name
if not expt_name:
assert experiment_name, "Must set experiment name"
expt_name = experiment_name
return "{}_{}".format(expt_name, val)
def ul(microliters):
"""Unicode function name for creating microliter volumes"""
if isinstance(microliters,str) and ':' in microliters:
return Unit(microliters).to('microliter')
return Unit(microliters,"microliter")
def hours(hours):
if isinstance(hours,str) and ':' in hours:
return Unit(hours).to('hour')
return Unit(hours,"hour")
def minutes(minutes):
if isinstance(minutes,str) and ':' in minutes:
return Unit(minutes).to('minute')
return Unit(minutes,"minute")
def ug(micrograms):
"""Unicode function name for creating microgram masses"""
return Unit(micrograms,"microgram")
def ng(nanograms):
"""Unicode function name for creating nanogram masses"""
return Unit(nanograms,"nanogram")
def ml(milliliters):
"""Unicode function name for creating microliter volumes"""
return ul(milliliters*1000)
def pmol(picomoles):
"""Unicode function name for creating picomoles"""
return Unit(picomoles,"picomole")
def uM(micromolar):
return Unit(micromolar,"micromolar")
def mM(millimolar):
return Unit(millimolar,'millimolar')
def ensure_list(potential_item_or_items):
try:
some_object_iterator = iter(potential_item_or_items)
except TypeError:
return [potential_item_or_items]
return list(potential_item_or_items)
def set_name(wellsorcontainer,new_name):
if isinstance(wellsorcontainer, Container):
wellsorcontainer.name = new_name
return
wells = convert_to_wellgroup(wellsorcontainer)
for well in wells:
well.name = new_name
return
def copy_well_names(source_wells_or_container, dest_wells_or_container, pre_fix='',
post_fix=''):
"""
Copy the name from a source container or list of wells to another container or list of wells.
If the wells don't have names, their human readable well name will be used
"""
source_wells = convert_to_wellgroup(source_wells_or_container)
dest_wells = convert_to_wellgroup(dest_wells_or_container)
#distribute
if len(source_wells)==1 and len(dest_wells)>1:
source_wells = list(source_wells)*len(dest_wells)
#consolidate
elif len(dest_wells)==1 and len(source_wells)>1:
dest_wells = list(dest_wells)*len(source_wells)
else:
assert len(source_wells)==len(dest_wells), 'source and dest wells must be the same cardinality'
for source_well, dest_well in zip(source_wells,dest_wells):
source_well_name = source_well.name if source_well.name else source_well.humanize()
dest_well.name = "%s%s%s"%(pre_fix, source_well_name, post_fix)
def convert_to_wellgroup(entity):
if isinstance(entity,Container):
wells = entity.all_wells()
elif isinstance(entity, list):
wells = WellGroup([])
#speed this function up for a common case
if all([isinstance(item,Well) for item in entity]):
return WellGroup(entity)
#slower mixed entity case
for item in entity:
wells += convert_to_wellgroup(item)
elif isinstance(entity,WellGroup):
#clone the entity to allow us to edit in in functions
wells = WellGroup(list(entity))
elif isinstance(entity,Well):
wells = WellGroup([entity])
else:
raise Exception("unknown entity type %s"%entity)
return wells
def assert_valid_volume(wells,exception_info='invalid volume'):
"""For wells that we have aspirated volume from, make sure that we haven't requested more volume than could be aspirated
"""
wells = ensure_list(wells)
assert all([well.volume >= get_well_dead_volume(well) for well in wells]), exception_info
assert all([well.volume <= get_well_max_volume(well) for well in wells]), exception_info
def get_column_wells(container, column_index_or_indexes):
assert isinstance(container, Container)
if isinstance(column_index_or_indexes,list):
result = []
for column_index in column_index_or_indexes:
result+=get_column_wells(container, column_index)
return WellGroup(result)
column_index = column_index_or_indexes
num_cols = container.container_type.col_count
num_rows = container.container_type.row_count()
if column_index >= num_cols:
raise ValueError('column index %s is too high, only %s cols in this container'%(column_index,num_cols))
start = num_rows*column_index
return WellGroup(container.all_wells(columnwise=True)[start:start+num_rows])
def breakup_dispense_column_volumes(column_volumes):
"""
Ensures that the column/volume pairs passed to dispense are less than 2.5mL (and multiples of 20uL)
"""
new_column_volumes = []
for col_volume_pair in column_volumes:
volume = col_volume_pair['volume'].to('microliter')
while volume>ml(2.5):
volume_to_breakup_ul = round_up(volume.magnitude/2,20)
new_column_volumes.append({'column':col_volume_pair['column'], 'volume':ul(volume_to_breakup_ul)})
volume-=ul(volume_to_breakup_ul)
new_column_volumes.append({'column':col_volume_pair['column'], 'volume':volume})
return new_column_volumes
def round_volume(volume, ndigits):
"""
Converts to microliters and performs rounding
"""
return ul(round(volume.to('microliter').magnitude,ndigits))
def ceil_volume(volume,ndigits=0):
"""
Converts to microliters and performs ceil
"""
magnitude = volume.to('microliter').magnitude
power_multiple = math.pow(10,ndigits)
return ul(math.ceil(magnitude * int(power_multiple)) / power_multiple)
def convert_mass_to_volume(mass_to_convert,dna_well):
if not dna_well.properties:
init_inventory_well(dna_well)
mass_to_convert_ng = mass_to_convert.to('nanogram')
dna_concentration_ng_per_ul = Unit(dna_well.properties['Concentration (DNA)']).to('nanogram/microliter')
dna_concentration_ul_per_ng = (1/dna_concentration_ng_per_ul).to('microliter/nanogram')
#liquid handler has .01 ul precision
return ceil_volume(mass_to_convert_ng * dna_concentration_ul_per_ng,2)
def convert_moles_to_volume(moles_to_convert,dna_well):
ng_per_pmol_1kb = Unit(649,'nanogram/picomole')
dna_length = int(dna_well.properties['dna_length'])
moles_to_convert_pmol = moles_to_convert.to('picomole')
mass_to_convert_ng = moles_to_convert_pmol * ng_per_pmol_1kb * dna_length / 1000.0
return convert_mass_to_volume(mass_to_convert_ng, dna_well)
def convert_stamp_shape_to_wells(source_origin, dest_origin, shape=dict(rows=8,
columns=12), one_source=False):
# Support existing transfer syntax by converting a container to all
# quadrants of that container
if isinstance(source_origin, Container):
source_plate = source_origin
source_plate_type = source_plate.container_type
if source_plate_type.well_count == 96:
source_origin = source_plate.well(0)
elif source_plate_type.well_count == 384:
source_origin = source_plate.wells([0, 1, 24, 25])
else:
raise TypeError("Invalid source_origin type given. If "
"source_origin is a container, it must be a "
"container with 96 or 384 wells.")
if isinstance(dest_origin, Container):
dest_plate = dest_origin
dest_plate_type = dest_plate.container_type
if dest_plate_type.well_count == 96:
dest_origin = dest_plate.well(0)
elif dest_plate_type.well_count == 384:
dest_origin = dest_plate.wells([0, 1, 24, 25])
else:
raise TypeError("Invalid dest_origin type given. If "
"dest_origin is a container, it must be a "
"container with 96 or 384 wells.")
# Initialize input parameters
source = WellGroup(source_origin)
dest = WellGroup(dest_origin)
opts = [] # list of transfers
oshp = [] # list of shapes
osta = [] # list of stamp_types
len_source = len(source.wells)
len_dest = len(dest.wells)
# Auto-generate well-group if only 1 well specified for either source
# or destination if one_source=False
if not one_source:
if len_dest > 1 and len_source == 1:
source = WellGroup(source.wells * len_dest)
len_source = len(source.wells)
if len_dest == 1 and len_source > 1:
dest = WellGroup(dest.wells * len_source)
len_dest = len(dest.wells)
if len_source != len_dest:
raise RuntimeError("To transfer liquid from one origin or "
"multiple origins containing the same "
"source, set one_source to True. To "
"transfer from multiple origins to a "
"single destination well, specify only one "
"destination well. Otherwise, you must "
"specify the same number of source and "
"destination wells to do a one-to-one "
"transfer.")
# Auto-generate list from single shape, check if list length matches
if isinstance(shape, dict):
if len_dest == 1 and not one_source:
shape = [shape] * len_source
else:
shape = [shape] * len_dest
elif isinstance(shape, list) and len(shape) == len_dest:
shape = shape
else:
raise RuntimeError("Unless the same shape is being used for all "
"transfers, each destination well must have a "
"corresponding shape in the form of a list.")
# Read through shape list and generate stamp_type, rows, and columns
stamp_type = []
rows = []
columns = []
for s in shape:
# Check and load rows/columns from given shape
if "rows" not in s or "columns" not in s:
raise TypeError("Invalid input shape given. Rows and columns "
"of a rectangle has to be defined.")
r = s["rows"]
c = s["columns"]
rows.append(r)
columns.append(c)
# Check on complete rows/columns (assumption: tip_layout=96)
if c == 12 and r == 8:
stamp_type.append("full")
elif c == 12:
stamp_type.append("row")
elif r == 8:
stamp_type.append("col")
else:
raise ValueError("Only complete rows or columns are allowed.")
all_source_wells = []
all_dest_wells = []
for w, c, r, st in list(zip(source.wells, columns, rows, stamp_type)):
columnWise = False
if st == "col":
columnWise = True
if w.container.container_type.col_count == 24:
if columnWise:
source_wells = [w.container.wells_from(
w, c * r * 4, columnWise)[x] for x in range(c * r * 4) if (x % 2) == (x // 16) % 2 == 0]
else:
source_wells = [w.container.wells_from(
w, c * r * 4, columnWise)[x] for x in range(c * r * 4) if (x % 2) == (x // 24) % 2 == 0]
else:
source_wells = w.container.wells_from(
w, c * r, columnWise)
all_source_wells += source_wells
for w, c, r, st in list(zip(dest.wells, columns, rows, stamp_type)):
columnWise = False
if st == "col":
columnWise = True
if w.container.container_type.col_count == 24:
if columnWise:
dest_wells = [w.container.wells_from(
w, c * r * 4, columnWise)[x] for x in range(c * r * 4) if (x % 2) == (x // 16) % 2 == 0]
else:
dest_wells = [w.container.wells_from(
w, c * r * 4, columnWise)[x] for x in range(c * r * 4) if (x % 2) == (x // 24) % 2 == 0]
else:
dest_wells = w.container.wells_from(
w, c * r, columnWise)
all_dest_wells += dest_wells
return all_source_wells, all_dest_wells
def calculate_dilution_volume(start_concentration, final_concentration, final_volume):
start_volume = final_concentration * final_volume / start_concentration
return start_volume.to('microliter')
UNIT_RE = re.compile('^(\d+\.?\d{0,2})([\w\/]+)$')
def convert_string_to_unit(s):
"""Handles malformated strings like 10uM"""
if ":" not in s:
match = UNIT_RE.match(s)
if match:
s = "%s:%s"%match.groups()
return Unit(s)
def get_diluent_volume(starting_concentration, dilutant_volume, desired_concentration):
if desired_concentration > starting_concentration:
raise Exception('starting concentration must be higher than desired concentration in a dilution')
dilution_multiple = (starting_concentration.to('uM') / desired_concentration).magnitude
diluent_volume = round_volume(dilutant_volume / (dilution_multiple - 1),2)
return diluent_volume
class InvalidContainerStateException(Exception):
pass
|
scottbecker/delve_tx_public
|
src/transcriptic_tools/utils.py
|
Python
|
mit
| 26,985
|
[
"Brian"
] |
3ca814cbee78cf7493f4bd7a44e756c9df4f45eec0cceed46fb51a25b54a4ac7
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ESMF, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_ESMF(ConfigureMake):
"""Support for building/installing ESMF."""
def configure_step(self):
"""Custom configuration procedure for ESMF through environment variables."""
env.setvar('ESMF_DIR', self.cfg['start_dir'])
env.setvar('ESMF_INSTALL_PREFIX', self.installdir)
env.setvar('ESMF_INSTALL_BINDIR', 'bin')
env.setvar('ESMF_INSTALL_LIBDIR', 'lib')
env.setvar('ESMF_INSTALL_MODDIR', 'mod')
# specify compiler
comp_family = self.toolchain.comp_family()
if comp_family in [toolchain.GCC]:
compiler = 'gfortran'
else:
compiler = comp_family.lower()
env.setvar('ESMF_COMPILER', compiler)
# specify MPI communications library
comm = None
mpi_family = self.toolchain.mpi_family()
if mpi_family in [toolchain.MPICH, toolchain.QLOGICMPI]:
# MPICH family for MPICH v3.x, which is MPICH2 compatible
comm = 'mpich2'
else:
comm = mpi_family.lower()
env.setvar('ESMF_COMM', comm)
# specify decent LAPACK lib
env.setvar('ESMF_LAPACK', 'user')
env.setvar('ESMF_LAPACK_LIBS', '%s %s' % (os.getenv('LDFLAGS'), os.getenv('LIBLAPACK_MT')))
# specify netCDF
netcdf = get_software_root('netCDF')
if netcdf:
env.setvar('ESMF_NETCDF', 'user')
netcdf_libs = ['-L%s/lib' % netcdf, '-lnetcdf']
# Fortran
netcdff = get_software_root('netCDF-Fortran')
if netcdff:
netcdf_libs = ["-L%s/lib" % netcdff] + netcdf_libs + ["-lnetcdff"]
else:
netcdf_libs.append('-lnetcdff')
# C++
netcdfcxx = get_software_root('netCDF-C++')
if netcdfcxx:
netcdf_libs = ["-L%s/lib" % netcdfcxx] + netcdf_libs + ["-lnetcdf_c++"]
else:
netcdf_libs.append('-lnetcdf_c++')
env.setvar('ESMF_NETCDF_LIBS', ' '.join(netcdf_libs))
# 'make info' provides useful debug info
cmd = "make info"
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def sanity_check_step(self):
"""Custom sanity check for ESMF."""
shlib_ext = get_shared_lib_ext()
custom_paths = {
'files':
[os.path.join('bin', x) for x in ['ESMF_Info', 'ESMF_InfoC', 'ESMF_RegridWeightGen', 'ESMF_WebServController']] +
[os.path.join('lib', x) for x in ['libesmf.a', 'libesmf.%s' % shlib_ext]],
'dirs': ['include', 'mod'],
}
super(EB_ESMF, self).sanity_check_step(custom_paths=custom_paths)
|
wpoely86/easybuild-easyblocks
|
easybuild/easyblocks/e/esmf.py
|
Python
|
gpl-2.0
| 4,281
|
[
"NetCDF"
] |
94ac9b9927cb868ece1c10befb92b44295bdd05754450be76dcfd7cccc3e5500
|
import json
import frappe
from erpnext.demo.domains import data
def setup_data():
setup_item()
setup_item_price()
frappe.db.commit()
frappe.clear_cache()
def setup_item():
items = json.loads(open(frappe.get_app_path('erpnext', 'demo', 'data', 'item.json')).read())
for i in items:
if not i.get("domain") == "Retail": continue
item = frappe.new_doc('Item')
item.update(i)
if hasattr(item, 'item_defaults') and item.item_defaults[0].default_warehouse:
item.item_defaults[0].company = data.get("Retail").get('company_name')
warehouse = frappe.get_all('Warehouse', filters={'warehouse_name': item.item_defaults[0].default_warehouse}, limit=1)
if warehouse:
item.item_defaults[0].default_warehouse = warehouse[0].name
item.insert()
def setup_item_price():
frappe.db.sql("delete from `tabItem Price`")
standard_selling = {
"OnePlus 6": 579,
"OnePlus 6T": 600,
"Xiaomi Poco F1": 300,
"Iphone XS": 999,
"Samsung Galaxy S9": 720,
"Sony Bluetooth Headphone": 99,
"Xiaomi Phone Repair": 10,
"Samsung Phone Repair": 20,
"OnePlus Phone Repair": 15,
"Apple Phone Repair": 30,
}
standard_buying = {
"OnePlus 6": 300,
"OnePlus 6T": 350,
"Xiaomi Poco F1": 200,
"Iphone XS": 600,
"Samsung Galaxy S9": 500,
"Sony Bluetooth Headphone": 69
}
for price_list in ("standard_buying", "standard_selling"):
for item, rate in locals().get(price_list).items():
frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list.replace("_", " ").title(),
"item_code": item,
"selling": 1 if price_list=="standard_selling" else 0,
"buying": 1 if price_list=="standard_buying" else 0,
"price_list_rate": rate,
"currency": "USD"
}).insert()
|
mhbu50/erpnext
|
erpnext/demo/setup/retail.py
|
Python
|
gpl-3.0
| 1,720
|
[
"Galaxy"
] |
72b2bf54c3241f5d59f91c72da7b55ecf7a6b2463f532c5139d0304c60dc919d
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Copy an AST tree, discarding annotations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import parser
class CleanCopier(gast.NodeVisitor):
"""Copies AST nodes.
The copied nodes will ignore almost all fields that are prefixed by '__'.
Exceptions make some annotations.
"""
# TODO(mdan): Parametrize which annotations get carried over.
def generic_visit(self, node):
new_fields = {}
for f in node._fields:
if f.startswith('__'):
continue
if not hasattr(node, f):
continue
v = getattr(node, f)
if isinstance(v, list):
v = [self.generic_visit(n) for n in v]
elif isinstance(v, tuple):
v = tuple(self.generic_visit(n) for n in v)
elif isinstance(v, (gast.AST, ast.AST)):
v = self.generic_visit(v)
else:
# Assume everything else is a value type.
pass
new_fields[f] = v
new_node = type(node)(**new_fields)
if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
anno.setanno(new_node, anno.Basic.SKIP_PROCESSING, True)
return new_node
def copy_clean(node):
copier = CleanCopier()
if isinstance(node, list):
return [copier.visit(n) for n in node]
elif isinstance(node, tuple):
return tuple(copier.visit(n) for n in node)
else:
return copier.visit(node)
class SymbolRenamer(gast.NodeTransformer):
"""Transformer that can rename symbols to a simple names."""
def __init__(self, name_map):
self.name_map = name_map
def _process(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
return gast.Name(str(self.name_map[qn]), node.ctx, None)
return self.generic_visit(node)
def visit_Name(self, node):
return self._process(node)
def visit_Attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
return self._process(node)
# Attributes of dynamic objects will not have a QN.
return self.generic_visit(node)
def rename_symbols(node, name_map):
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
elif isinstance(node, tuple):
return tuple(renamer.visit(n) for n in node)
return renamer.visit(node)
def keywords_to_dict(keywords):
keys = []
values = []
for kw in keywords:
keys.append(gast.Str(kw.arg))
values.append(kw.value)
return gast.Dict(keys=keys, values=values)
class PatternMatcher(gast.NodeVisitor):
"""Matches a node against a pattern represented by a node.
The pattern may contain wildcards represented by the symbol '_'.
"""
def __init__(self, pattern):
self.pattern = pattern
self.pattern_stack = []
self.matches = True
def compare_and_visit(self, node, pattern):
self.pattern_stack.append(self.pattern)
self.pattern = pattern
self.generic_visit(node)
self.pattern = self.pattern_stack.pop()
def no_match(self):
self.matches = False
return False
def is_wildcard(self, p):
if isinstance(p, (list, tuple)) and len(p) == 1:
p, = p
if isinstance(p, gast.Name) and p.id == '_':
return True
if p == '_':
return True
return False
def generic_visit(self, node):
if not self.matches:
return
pattern = self.pattern
for f in node._fields:
if f.startswith('__'):
continue
if not hasattr(node, f):
if hasattr(pattern, f) and getattr(pattern, f):
return self.no_match()
else:
continue
if not hasattr(pattern, f):
return self.no_match()
v = getattr(node, f)
p = getattr(pattern, f)
if self.is_wildcard(p):
continue
if isinstance(v, (list, tuple)):
if not isinstance(p, (list, tuple)) or len(v) != len(p):
return self.no_match()
for v_item, p_item in zip(v, p):
self.compare_and_visit(v_item, p_item)
elif isinstance(v, (gast.AST, ast.AST)):
if not isinstance(v, type(p)) and not isinstance(p, type(v)):
return self.no_match()
self.compare_and_visit(v, p)
else:
# Assume everything else is a value type.
if v != p:
return self.no_match()
def matches(node, pattern):
if isinstance(pattern, str):
pattern = parser.parse_expression(pattern)
matcher = PatternMatcher(pattern)
matcher.visit(node)
return matcher.matches
|
drpngx/tensorflow
|
tensorflow/contrib/autograph/pyct/ast_util.py
|
Python
|
apache-2.0
| 5,240
|
[
"VisIt"
] |
a4b20ea340903174ef7b471d781cbddfeed7bd9f8e6cb3e030aad51b6304e854
|
# -*- coding: UTF-8 -*-
# Documentation available here:
# http://www.vtk.org/VTK/img/file-formats.pdf
import os
import copy
import tempfile
import numpy as np
from tractconverter.formats import header
from tractconverter.formats.header import Header as H
def readBinaryBytes(f, nbBytes, dtype):
buff = f.read(nbBytes * dtype.itemsize)
return np.frombuffer(buff, dtype=dtype)
def readAsciiBytes(f, nbWords, dtype):
words = []
buff = ""
while len(words) < nbWords:
c = f.read(1)
if c == " " or c == '\n':
if len(buff) > 0:
words.append(buff)
buff = ""
else:
buff += c
return np.array(' '.join(words).split(), dtype=dtype)
# We assume the file cursor points to the beginning of the file.
def checkIfBinary(f):
f.readline() # Skip version
f.readline() # Skip description
file_type = f.readline().strip() # Type of the file BINARY or ASCII.
f.seek(0, 0) # Reset cursor to beginning of the file.
return file_type.upper() == "BINARY"
def convertAsciiToBinary(original_filename):
sections = get_sections(original_filename)
f = open(original_filename, 'rb')
# Skip the first header lines
f.readline() # Version (not used)
f.readline() # Description (not used)
original_file_type = f.readline().strip() # Type of the file BINARY or ASCII.
f.readline() # Data type (not used)
if original_file_type.upper() != "ASCII":
raise ValueError("BINARY file given to convertAsciiToBinary.")
# Create a temporary file with a name. Delete is set to false to make sure
# the file is not automatically deleted when closed.
binary_file = tempfile.NamedTemporaryFile(delete=False)
# Write header
binary_file.write("# {0} DataFile Version {1}\n".format(VTK.MAGIC_NUMBER, VTK.VERSION))
binary_file.write("converted from ASCII vtk by tractconverter\n")
binary_file.write("BINARY\n")
binary_file.write("DATASET POLYDATA\n")
# Convert POINTS section from ASCII to binary
f.seek(sections['POINTS'], os.SEEK_SET)
line = f.readline() # POINTS n float
nb_coordinates = int(line.split()[1]) * 3
binary_file.write(line)
while nb_coordinates * 3 > 0:
tokens = f.readline().split()
#Skip empty lines
if len(tokens) == 0:
continue
binary_file.write(np.array(tokens, dtype='>f4').tostring())
nb_coordinates -= len(tokens)
binary_file.write('\n')
if 'LINES' in sections:
# Convert LINES section from ASCII to binary
f.seek(sections['LINES'], os.SEEK_SET)
line = f.readline() # LINES n size
nb_lines = int(line.split()[1])
binary_file.write(line)
while nb_lines > 0:
tokens = f.readline().split()
#Skip empty lines
if len(tokens) == 0:
continue
#Write number of points in the line
binary_file.write(np.array([tokens[0]], dtype='>i4').tostring())
#Write indices of points in the line
binary_file.write(np.array(tokens[1:], dtype='>i4').tostring())
nb_lines -= 1
# TODO: COLORS, SCALARS
binary_file.close()
f.close()
return binary_file.name
POLYDATA_SECTIONS = ['POINTS', 'VERTICES', 'LINES', 'POLYGONS', 'TRIANGLE_STRIPS']
def get_sections(filename):
sections_found = {}
nb_read_bytes = 0
with open(filename, 'rb') as f:
for line in f:
for section in POLYDATA_SECTIONS:
if line.upper().startswith(section):
if section in sections_found:
print "Warning multiple {0} sections!".format(section)
sections_found[section] = nb_read_bytes
nb_read_bytes += len(line)
return sections_found
class VTK:
MAGIC_NUMBER = "vtk"
VERSION = "3.0"
BUFFER = 10000
# self.hdr
# self.filename
# self.endian
# self.offset
# self.FIBER_DELIMITER
# self.END_DELIMITER
#####
# Static Methods
###
@staticmethod
def _check(filename):
f = open(filename, 'rb')
magicNumber = f.readline().strip()
f.close()
return VTK.MAGIC_NUMBER in magicNumber
@staticmethod
def create(filename, hdr=None, anatFile=None):
f = open(filename, 'wb')
f.write(VTK.MAGIC_NUMBER + "\n")
f.close()
if hdr is None:
hdr = VTK.get_empty_header()
else:
hdr = copy.deepcopy(hdr)
vtk = VTK(filename, load=False)
vtk.hdr = hdr
vtk.writeHeader()
return vtk
#####
# Methods
###
def __init__(self, filename, anatFile=None, load=True):
if not VTK._check(filename):
raise NameError("Not a VTK file.")
self.filename = filename
self.original_filename = filename
self.hdr = {}
if load:
self.hdr = header.get_header_from_anat(anatFile)
self._load()
def __del__(self):
self.cleanTempFile()
def _load(self):
f = open(self.filename, 'rb')
#####
# Read header
###
info = f.readline().split()
self.hdr[H.MAGIC_NUMBER] = info[1]
self.hdr["version"] = info[-1]
self.hdr["description"] = f.readline().strip()
self.hdr["file_type"] = f.readline().strip()
#####
# If in ASCII format, create a temporary Binary file. This
# will avoid lots of problems when reading.
# We will always read a binary file, converted or not.
#####
if "BINARY" != self.hdr["file_type"].upper():
f.close()
binary_filename = convertAsciiToBinary(self.filename)
self.filename = binary_filename
self.sections = get_sections(self.filename)
#TODO: Check number of scalars and properties
self.hdr[H.NB_SCALARS_BY_POINT] = "N/A"
self.hdr[H.NB_PROPERTIES_BY_TRACT] = "N/A"
f = open(self.filename, 'rb')
#####
# Read header
###
f.readline() # Version (not used)
f.readline() # Description (not used)
self.fileType = f.readline().strip() # Type of the file BINARY or ASCII.
f.readline() # Data type (not used)
#self.offset = f.tell() # Store offset to the beginning of data.
f.seek(self.sections['POINTS'], os.SEEK_SET)
self.hdr[H.NB_POINTS] = int(f.readline().split()[1]) # POINTS n float
#self.offset_points = f.tell()
#f.seek(self.hdr[H.NB_POINTS] * 3 * 4, 1) # Skip nb_points * 3 (x,y,z) * 4 bytes
# Skip newline, to bring to the line containing the LINES marker.
#f.readline()
self.hdr[H.NB_FIBERS] = 0
if 'LINES' in self.sections:
f.seek(self.sections['LINES'], os.SEEK_SET)
infos = f.readline().split() # LINES n size
self.hdr[H.NB_FIBERS] = int(infos[1])
#size = int(infos[2])
#self.offset_lines = f.tell()
#f.seek(size * 4, 1) # Skip nb_lines + nb_points * 4 bytes
# TODO: Read infos about COLORS, SCALARS, ...
f.close()
@classmethod
def get_empty_header(cls):
hdr = {}
#Default values
hdr[H.MAGIC_NUMBER] = cls.MAGIC_NUMBER
hdr[H.NB_FIBERS] = 0
hdr[H.NB_POINTS] = 0
hdr[H.NB_SCALARS_BY_POINT] = 0
hdr[H.NB_PROPERTIES_BY_TRACT] = 0
return hdr
def writeHeader(self):
self.sections = {}
f = open(self.filename, 'wb')
f.write("# {0} DataFile Version {1}\n".format(VTK.MAGIC_NUMBER, VTK.VERSION))
f.write("vtk comments\n")
f.write("BINARY\n") # Support only binary file for saving.
f.write("DATASET POLYDATA\n")
# POINTS
self.sections['POINTS'] = f.tell()
f.write("POINTS {0} float\n".format(self.hdr[H.NB_POINTS]))
self.sections['POINTS_start'] = f.tell()
self.sections['POINTS_current'] = f.tell()
#self.offset = f.tell()
f.write(np.zeros((self.hdr[H.NB_POINTS], 3), dtype='>f4'))
f.write('\n')
# LINES
if self.hdr[H.NB_FIBERS] > 0:
self.sections['LINES'] = f.tell()
size = self.hdr[H.NB_FIBERS] + self.hdr[H.NB_POINTS]
f.write("LINES {0} {1}\n".format(self.hdr[H.NB_FIBERS], size))
self.sections['LINES_current'] = f.tell()
f.write(np.zeros(size, dtype='>i4'))
# TODO: COLORS, SCALARS
f.close()
def cleanTempFile(self):
# If the filenames differ, we converted an ASCII file to a binary file.
# In this case, if the temporary binary file still exists, we need to clean up behind ourselves.
if self.filename != self.original_filename and os.path.exists(self.filename):
os.remove(self.filename)
self.filename = self.original_filename
def close(self):
self.cleanTempFile()
pass
# TODO: make it really dynamic if possible (like trk and tck).
def __iadd__(self, fibers):
if len(fibers) == 0:
return self
f = open(self.filename, 'r+b')
f.seek(self.sections['POINTS_current'], os.SEEK_SET)
nb_points = (self.sections['POINTS_current'] - self.sections['POINTS_start']) // 3 // 4
for fib in fibers:
f.write(fib.astype('>f4').tostring())
self.sections['POINTS_current'] = f.tell()
f.seek(self.sections['LINES_current'], os.SEEK_SET)
for fib in fibers:
f.write(np.array([len(fib)], dtype='>i4').tostring())
f.write(np.arange(nb_points, nb_points + len(fib), dtype='>i4').tostring())
nb_points += len(fib)
self.sections['LINES_current'] = f.tell()
f.close()
return self
#####
# Iterate through fibers
# TODO: Use a buffer instead of reading one streamline at the time.
###
def __iter__(self):
if self.hdr[H.NB_FIBERS] == 0:
return
f = open(self.filename, 'rb')
#Keep important positions in the file.
f.seek(self.sections['POINTS'], os.SEEK_SET)
f.readline()
self.sections['POINTS_current'] = f.tell()
f.seek(self.sections['LINES'], os.SEEK_SET)
f.readline()
self.sections['LINES_current'] = f.tell()
for i in range(0, self.hdr[H.NB_FIBERS], self.BUFFER):
f.seek(self.sections['LINES_current'], os.SEEK_SET) # Seek from beginning of the file
# Read indices of next streamline
nbIdx = []
ptsIdx = []
for k in range(min(self.hdr[H.NB_FIBERS], i+self.BUFFER) - i):
nbIdx.append(readBinaryBytes(f, 1, np.dtype('>i4'))[0])
ptsIdx.append(readBinaryBytes(f, nbIdx[-1], np.dtype('>i4')))
self.sections['LINES_current'] = f.tell()
# Read points according to indices previously read
startPos = np.min(ptsIdx[0]) * 3 # Minimum index * 3 (x,y,z)
endPos = (np.max(ptsIdx[-1]) + 1) * 3 # After maximum index * 3 (x,y,z)
f.seek(self.sections['POINTS_current'] + startPos * 4, os.SEEK_SET) # Seek from beginning of the file
points = readBinaryBytes(f, endPos - startPos, np.dtype('>f4'))
points = points.reshape([-1, 3]) # Matrix dimension: Nx3
# TODO: Read COLORS, SCALARS, ...
for pts_id in ptsIdx:
yield points[pts_id - startPos/3]
f.close()
def load_all(self):
# TODO: make it more efficient, load everything in memory first
# and to processing afterward.
return [s for s in self]
def __str__(self):
text = ""
text += "MAGIC NUMBER: {0}".format(self.hdr[H.MAGIC_NUMBER])
text += "\nv.{0}".format(self.hdr['version'])
text += "\nDescription: '{0}'".format(self.hdr['description'])
text += "\nFile type: {0}".format(self.hdr['file_type'])
text += "\nnb_scalars: {0}".format(self.hdr[H.NB_SCALARS_BY_POINT])
text += "\nnb_properties: {0}".format(self.hdr[H.NB_PROPERTIES_BY_TRACT])
text += "\nn_count: {0}".format(self.hdr[H.NB_FIBERS])
return text
|
MarcCote/tractconverter
|
tractconverter/formats/vtk.py
|
Python
|
bsd-3-clause
| 12,340
|
[
"VTK"
] |
bddfc3a0ba027f573a5f56266e339381023c98dcc6e80e0d58148eefc7ff88bf
|
#! /usr/bin/env python
"""
Module with frame/cube filtering functionalities.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez, Valentin Christiaens'
__all__ = ['frame_filter_highpass',
'frame_filter_lowpass',
'frame_deconvolution',
'cube_filter_highpass',
'cube_filter_lowpass',
'cube_filter_iuwt']
import warnings
try:
import cv2
no_opencv = False
except ImportError:
msg = "Opencv python bindings are missing."
warnings.warn(msg, ImportWarning)
no_opencv = True
import numpy as np
from scipy.ndimage import median_filter
from skimage.restoration import richardson_lucy
from astropy.convolution import (convolve_fft, convolve, Gaussian2DKernel)
from astropy.convolution import interpolate_replace_nans as interp_nan
from astropy.stats import gaussian_fwhm_to_sigma
from .iuwt import iuwt_decomposition
from ..config import Progressbar
def cube_filter_iuwt(cube, coeff=5, rel_coeff=1, full_output=False):
"""
Isotropic Undecimated Wavelet Transform filtering.
Parameters
----------
cube : numpy ndarray
Input cube.
coeff : int, optional
Number of wavelet scales to be used in the decomposition.
rel_coeff : int, optional
Number of relevant coefficients. In other words how many wavelet scales
will represent in a better way our data. One or two scales are enough
for filtering our images.
full_output : bool, optional
If True, an additional cube with the multiscale decomposition of each
frame will be returned.
Returns
-------
cubeout : numpy ndarray
Output cube with the filtered frames.
If full_output is True the filtered cube is returned together with the a
4d cube containing the multiscale decomposition of each frame.
"""
cubeout = np.zeros_like(cube)
cube_coeff = np.zeros((cube.shape[0], coeff, cube.shape[1], cube.shape[2]))
n_frames = cube.shape[0]
print('Decomposing frames with the Isotropic Undecimated Wavelet Transform')
for i in Progressbar(range(n_frames)):
res = iuwt_decomposition(cube[i], coeff, store_smoothed=False)
cube_coeff[i] = res
for j in range(rel_coeff):
cubeout[i] += cube_coeff[i][j]
if full_output:
return cubeout, cube_coeff
else:
return cubeout
def cube_filter_highpass(array, mode='laplacian', verbose=True, **kwargs):
"""
Apply ``frame_filter_highpass`` to the frames of a 3d or 4d cube.
Parameters
----------
array : numpy ndarray
Input cube, 3d or 4d.
mode : str, optional
``mode`` parameter to the ``frame_filter_highpass`` function. Defaults
to a Laplacian high-pass filter.
verbose : bool, optional
If ``True`` timing and progress bar are shown.
**kwargs : dict
Passed through to the ``frame_filter_highpass`` function.
Returns
-------
filtered : numpy ndarray
High-pass filtered cube.
"""
array_out = np.empty_like(array)
if array.ndim == 3:
for i in Progressbar(range(array.shape[0]), verbose=verbose):
array_out[i] = frame_filter_highpass(array[i], mode=mode, **kwargs)
elif array.ndim == 4:
for i in Progressbar(range(array.shape[1]), verbose=verbose):
for lam in range(array.shape[0]):
array_out[lam][i] = frame_filter_highpass(array[lam][i],
mode=mode, **kwargs)
else:
raise TypeError('Input array is not a 3d or 4d cube')
return array_out
def fft(array):
"""
Perform the 2d discrete Fourier transform, using numpy's fft2 function.
This produces a new representation of
the image in which each pixel represents a spatial frequency and
orientation, rather than an xy coordinate. When Fourier-transformed images
are plotted graphically, the low frequencies are found at the centre; this
is not what fft2 actually produces, so we need to also apply numpy's
fftshift (centering low frequencies).
"""
fft_array = np.fft.fftshift(np.fft.fft2(array))
return fft_array
def ifft(array):
"""
Get the inverse Fourier transform on the image.
This produces an array of complex numbers whose real values correspond to
the image in the original space (decentering).
Notes
-----
A real function corresponds to a symmetric function in fourier space. As
long as the operations we apply in the fourier space do not break this
symmetry, the data returned by ``ifft`` should not containy any imaginary
part.
"""
new_array = np.fft.ifft2(np.fft.ifftshift(array)).real
return new_array
def frame_filter_highpass(array, mode, median_size=5, kernel_size=5,
fwhm_size=5, btw_cutoff=0.2, btw_order=2,
hann_cutoff=5, psf=None, conv_mode='conv',
mask=None):
"""
High-pass filtering of input frame depending on parameter ``mode``.
The filtered image properties will depend on the ``mode`` and the relevant
parameters.
Parameters
----------
array : numpy ndarray
Input array, 2d frame.
mode : str
Type of High-pass filtering.
``laplacian``
applies a Laplacian filter with kernel size defined by
``kernel_size`` using the Opencv library.
``laplacian-conv``
applies a Laplacian high-pass filter by defining a kernel (with
``kernel_size``) and using the ``convolve_fft`` Astropy function.
``median-subt``
subtracts a median low-pass filtered version of the image.
``gauss-subt``
subtracts a Gaussian low-pass filtered version of the image.
``fourier-butter``
applies a high-pass 2D Butterworth filter in Fourier domain.
``hann``
uses a Hann window.
median_size : int, optional
Size of the median box for the ``median-subt`` filter.
kernel_size : int, optional
Size of the Laplacian kernel used in ``laplacian`` mode. It must be an
positive odd integer value.
fwhm_size : float, optional
Size of the Gaussian kernel used in ``gaus-subt`` mode.
btw_cutoff : float, optional
Frequency cutoff for low-pass 2d Butterworth filter used in
``fourier-butter`` mode.
btw_order : int, optional
Order of low-pass 2d Butterworth filter used in ``fourier-butter`` mode.
hann_cutoff : float
Frequency cutoff for the ``hann`` mode.
psf: numpy ndarray, optional
Input normalised and centered psf, 2d frame. Should be provided if
mode is set to 'psf'.
conv_mode : {'conv', 'convfft'}, str optional
'conv' uses the multidimensional gaussian filter from scipy.ndimage and
'convfft' uses the fft convolution with a 2d Gaussian kernel.
mask: numpy ndarray, optional
Binary mask indicating where the low-pass filtered image should be
interpolated with astropy.convolution. This otion can be useful if the
low-pass filtered image is aimed to capture low-spatial frequency sky
signal, while avoiding a stellar halo (set to one in the binary mask).
Note: only works with Gaussian kernel or PSF convolution.
Returns
-------
filtered : numpy ndarray
High-pass filtered image.
"""
def butter2d_lp(size, cutoff, n=3):
"""
Create low-pass 2D Butterworth filter.
Function from PsychoPy library, credits to Jonathan Peirce, 2010
Parameters
----------
size : tuple
size of the filter
cutoff : float
relative cutoff frequency of the filter (0 - 1.0)
n : int, optional
order of the filter, the higher n is the sharper
the transition is.
Returns
-------
numpy ndarray
filter kernel in 2D centered
"""
if not 0 < cutoff <= 1:
raise ValueError('Cutoff frequency must be between 0 and 1.')
if not isinstance(n, int):
raise ValueError('n must be an integer >= 1.')
rows, cols = size
x = np.linspace(-0.5, 0.5, cols) * cols
y = np.linspace(-0.5, 0.5, rows) * rows
# An array with every pixel = radius relative to center
radius = np.sqrt((x**2)[np.newaxis] + (y**2)[:, np.newaxis])
# The filter
f = 1 / (1 + (radius / cutoff)**(2*n))
return f
def round_away(x):
"""
Round to the *nearest* integer, half-away-from-zero.
Parameters
----------
x : array-like
Returns
-------
r_rounded : array-like (float)
Notes
-----
IDL ``ROUND`` rounds to the *nearest* integer (commercial rounding),
unlike numpy's round/rint, which round to the nearest *even*
value (half-to-even, financial rounding) as defined in IEEE-754
standard.
"""
return np.trunc(x + np.copysign(0.5, x))
# --------------------------------------------------------------------------
if array.ndim != 2:
raise TypeError("Input array is not a frame or 2d array.")
if mask is not None and (mode!='psf-subt' and mode!='gauss-subt'):
msg="Masking option only available for gauss-subt and psf-subt modes"
raise TypeError(msg)
if mode == 'laplacian':
# Applying a Laplacian high-pass kernel
if kernel_size % 2 == 0 or kernel_size < 0:
raise ValueError("Kernel size must be an odd and positive value.")
if not no_opencv:
msg = "Opencv bindings are missing. Trying a convolution with a "
msg += "Laplacian kernel instead."
filtered = cv2.Laplacian(-array, cv2.CV_32F, ksize=kernel_size)
elif mode == 'laplacian-conv':
# Applying a Laplacian high-pass kernel defining a kernel and using
# the convolve_fft Astropy function
kernel3 = np.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
kernel5 = np.array([[-4, -1, 0, -1, -4],
[-1, 2, 3, 2, -1],
[0, 3, 4, 3, 0],
[-1, 2, 3, 2, -1],
[-4, -1, 0, -1, -4]])
kernel7 = np.array([[-10, -5, -2, -1, -2, -5, -10],
[-5, 0, 3, 4, 3, 0, -5],
[-2, 3, 6, 7, 6, 3, -2],
[-1, 4, 7, 8, 7, 4, -1],
[-2, 3, 6, 7, 6, 3, -2],
[-5, 0, 3, 4, 3, 0, -5],
[-10, -5, -2, -1, -2, -5, -10]])
if kernel_size == 3:
kernel = kernel3
elif kernel_size == 5:
kernel = kernel5
elif kernel_size == 7:
kernel = kernel7
else:
raise ValueError('Kernel size must be either 3, 5 or 7.')
filtered = convolve_fft(array, kernel, normalize_kernel=False,
nan_treatment='fill')
elif mode == 'median-subt':
# Subtracting the low_pass filtered (median) image from the image itself
medianed = frame_filter_lowpass(array, 'median',
median_size=median_size)
filtered = array - medianed
elif mode == 'gauss-subt':
# Subtracting the low_pass filtered (median) image from the image itself
gaussed = frame_filter_lowpass(array, 'gauss', fwhm_size=fwhm_size,
conv_mode=conv_mode, mask=mask)
filtered = array - gaussed
elif mode == 'psf-subt':
if psf is None:
raise TypeError("psf should be provided for psf-subt mode")
# Subtracting the low_pass filtered (median) image from the image itself
psfed = frame_filter_lowpass(array, 'psf', psf=psf, mask=mask)
filtered = array - psfed
elif mode == 'fourier-butter':
# Designs an n-th order high-pass 2D Butterworth filter
filt = 1 - butter2d_lp(array.shape, cutoff=btw_cutoff, n=btw_order)
array_fft = fft(array)
fft_new = array_fft * filt
filtered = ifft(fft_new)
elif mode == 'hann':
# TODO: this code could be shortened using np.convolve
# see http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
# create a Hanning profile window cut at the chosen frequency:
npix = array.shape[0]
cutoff = npix/2 * hann_cutoff
cutoff_inside = round_away(np.minimum(cutoff, (npix/2 - 1))).astype(int)
winsize = 2*cutoff_inside + 1
win1d = np.hanning(winsize)
win = 1 - np.outer(win1d, win1d)
array_fft = fft(array)
# remove high spatial frequency along the Hann profile:
array_fft[npix//2 - cutoff_inside: npix//2 + cutoff_inside + 1,
npix//2 - cutoff_inside: npix//2 + cutoff_inside + 1] *= win
filtered = ifft(array_fft)
else:
raise TypeError('Mode not recognized.')
return filtered
def frame_filter_lowpass(array, mode='gauss', median_size=5, fwhm_size=5,
conv_mode='convfft', kernel_sz=None, psf=None,
mask=None, iterate=True, half_res_y=False, **kwargs):
"""
Low-pass filtering of input frame depending on parameter ``mode``.
Parameters
----------
array : numpy ndarray
Input array, 2d frame.
mode : {'median', 'gauss', 'psf'}, str optional
Type of low-pass filtering.
median_size : int, optional
Size of the median box for filtering the low-pass median filter.
fwhm_size : float, optional
Size of the Gaussian kernel for the low-pass Gaussian filter.
conv_mode : {'conv', 'convfft'}, str optional
'conv' uses the multidimensional gaussian filter from scipy.ndimage and
'convfft' uses the fft convolution with a 2d Gaussian kernel.
kernel_sz: int or None, optional
Size of the kernel in pixels for 2D Gaussian and Moffat convolutions.
If None, astropy.convolution will automatically consider 8*radius
kernel sizes.
psf: numpy ndarray, optional
Input normalised and centered psf, 2d frame. Should be provided if
mode is set to 'psf'.
mask: numpy ndarray, optional
Binary mask indicating where the low-pass filtered image should be
interpolated with astropy.convolution. This option can be useful if the
low-pass filtered image is aimed to capture low-spatial frequency sky
signal, while avoiding a stellar halo (set to one in the binary mask).
Note: only works with Gaussian kernel or PSF convolution.
iterate: bool, opt
If the first convolution leaves nans, whether to continue replacing
nans by interpolation until they are all replaced.
half_res_y: bool, {True,False}, optional
Whether the input data has only half the angular resolution vertically
compared to horizontally (e.g. the case for some IFUs); in other words
there are always 2 rows of pixels with exactly the same values.
If so, the kernel will also be squashed vertically by a factor 2.
Only used if mode is 'gauss'
**kwargs : dict
Passed through to the astropy.convolution.convolve or convolve_fft
function.
Returns
-------
filtered : numpy ndarray
Low-pass filtered image.
"""
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array.')
if not isinstance(median_size, int):
raise ValueError('`Median_size` must be integer')
if mask is not None:
if mode== 'median':
msg="Masking not available for median filter"
if mask.shape != array.shape:
msg = "Mask dimensions should be the same as array"
raise TypeError(msg)
if mode == 'median':
# creating the low_pass filtered (median) image
filtered = median_filter(array, median_size, mode='nearest')
elif mode == 'gauss':
# 2d Gaussian filter
sigma = fwhm_size * gaussian_fwhm_to_sigma
kernel_sz_y = kernel_sz
if half_res_y:
sigma_y = max(1, sigma//2)
if kernel_sz is not None:
kernel_sz_y = kernel_sz//2
if kernel_sz_y%2 != kernel_sz%2:
kernel_sz_y+=1
else:
sigma_y=sigma
if conv_mode == 'conv':
filtered = convolve(array, Gaussian2DKernel(x_stddev=sigma,
y_stddev=sigma_y,
x_size=kernel_sz,
y_size=kernel_sz_y),
mask=mask, **kwargs)
if iterate and np.sum(np.isnan(filtered))>0:
filtered = interp_nan(filtered,
Gaussian2DKernel(x_stddev=sigma,
y_stddev=sigma_y,
x_size=kernel_sz,
y_size=kernel_sz_y),
convolve=convolve)
elif conv_mode == 'convfft':
# FFT Convolution with a 2d gaussian kernel created with Astropy.
filtered = convolve_fft(array, Gaussian2DKernel(x_stddev=sigma,
y_stddev=sigma_y,
x_size=kernel_sz,
y_size=kernel_sz_y),
mask=mask, **kwargs)
if iterate and np.sum(np.isnan(filtered))>0:
filtered = interp_nan(filtered,
Gaussian2DKernel(x_stddev=sigma,
y_stddev=sigma_y,
x_size=kernel_sz,
y_size=kernel_sz_y),
convolve=convolve_fft, **kwargs)
else:
raise TypeError('2d Gaussian filter mode not recognized')
elif mode == 'psf':
if psf is None:
raise TypeError('psf should be provided for convolution')
elif psf.ndim != 2:
raise TypeError('Input psf is not a frame or 2d array.')
if psf.shape[-1] > array.shape[-1]:
raise TypeError('Input psf is larger than input array. Crop.')
# psf convolution
if conv_mode == 'conv':
filtered = convolve(array, psf, mask=mask, **kwargs)
if iterate and np.sum(np.isnan(filtered))>0:
filtered = interp_nan(filtered, psf, convolve=convolve,
**kwargs)
elif conv_mode == 'convfft':
filtered = convolve_fft(array, psf, mask=mask, **kwargs)
if iterate and np.sum(np.isnan(filtered))>0:
filtered = interp_nan(filtered, psf, convolve=convolve_fft,
**kwargs)
else:
raise TypeError('Low-pass filter mode not recognized')
return filtered
def cube_filter_lowpass(array, mode='gauss', median_size=5, fwhm_size=5,
conv_mode='conv', kernel_sz=None, verbose=True,
psf=None, mask=None, iterate=True, **kwargs):
"""
Apply ``frame_filter_lowpass`` to the frames of a 3d or 4d cube.
Parameters
----------
array : numpy ndarray
Input cube, 3d or 4d.
mode : str, optional
See the documentation of the ``frame_filter_lowpass`` function.
median_size : int, optional
See the documentation of the ``frame_filter_lowpass`` function.
fwhm_size : float, optional
See the documentation of the ``frame_filter_lowpass`` function.
conv_mode : str, optional
See the documentation of the ``frame_filter_lowpass`` function.
kernel_sz: int, optional
See the documentation of the ``frame_filter_lowpass`` function.
verbose : bool, optional
If True timing and progress bar are shown.
psf: numpy ndarray, optional
Input normalised and centered psf, 2d frame. Should be provided if
mode is set to 'psf'.
mask: numpy ndarray, optional
Binary mask indicating where the low-pass filtered image should be
interpolated with astropy.convolution. This otion can be useful if the
low-pass filtered image is aimed to capture low-spatial frequency sky
signal, while avoiding a stellar halo (set to one in the binary mask).
Note: only works with Gaussian kernel or PSF convolution.
iterate: bool, opt
If the first convolution leaves nans, whether to continue replacing
nans by interpolation until they are all replaced.
**kwargs : dict
Passed through to the astropy.convolution.convolve or convolve_fft
function.
Returns
-------
filtered : numpy ndarray
Low-pass filtered cube.
"""
array_out = np.empty_like(array)
if array.ndim == 3:
for i in Progressbar(range(array.shape[0]), verbose=verbose):
array_out[i] = frame_filter_lowpass(array[i], mode, median_size,
fwhm_size, conv_mode,
kernel_sz, psf, mask, iterate,
**kwargs)
elif array.ndim == 4:
for i in Progressbar(range(array.shape[1]), verbose=verbose):
for lam in range(array.shape[0]):
array_out[lam][i] = frame_filter_lowpass(array[lam][i], mode,
median_size, fwhm_size,
conv_mode, kernel_sz,
psf, mask, iterate,
**kwargs)
else:
raise TypeError('Input array is not a 3d or 4d cube')
return array_out
def frame_deconvolution(array, psf, n_it=30):
"""
Iterative image deconvolution following the scikit-image implementation
of the Richardson-Lucy algorithm.
Considering an image that has been convolved by the point spread function
of an instrument, the algorithm will sharpen the blurred
image through a user-defined number of iterations, which changes the
regularisation.
Reference: William Hadley Richardson, “Bayesian-Based Iterative Method of
Image Restoration”, J. Opt. Soc. Am. A 27, 1593-1607 (1972),
DOI:10.1364/JOSA.62.000055
See also description at:
https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution
Parameters
----------
array : numpy ndarray
Input image, 2d frame.
psf : numpy ndarray
Input psf, 2d frame.
n_it : int, optional
Number of iterations.
Returns
-------
deconv : numpy ndarray
Deconvolved image.
"""
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array.')
if psf.ndim != 2:
raise TypeError('Input psf is not a frame or 2d array.')
max_I = np.amax(array)
min_I = np.amin(array)
drange = max_I-min_I
deconv = richardson_lucy((array-min_I)/drange, psf, iterations=n_it)
deconv*=drange
deconv+=min_I
return deconv
|
vortex-exoplanet/VIP
|
vip_hci/var/filters.py
|
Python
|
mit
| 23,900
|
[
"Gaussian"
] |
91e3531857d9b9972e03c82bc9f3fa7cd8e76ef829eed474908e3720ed3bd5fd
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build phase 3 cmap requirements data.
This starts with default assignments based on unicode property
script and script_extensions data, then applies a sequences of
operations to generate an allocation of cmaps to 'scripts' i.e.
font families. The operations include assigning/removing common
characters in blocks, or entire blocks, to/from scripts,
assigning additional punctuation (based on reading the Unicode
8 standard and various L2 docs), and so on.
This uses pseudo script codes to represent the font families,
but this needs to be changed to some better representation.
for now, these are:
CJK: for all CJK scripts
EXCL: for excluded blocks (PUA, surrogates)
MONO: for blocks going into a monospace font
MUSIC: for blocks going into a music font
SYM2: for blocks going into a 'symbols 2' font with fewer masters
Zmth: for blocks going into a 'math' font
ZSym: for blocks going into the main symbols font (6 masters)
ZSye: for blocks going into the color emoji font
"""
import argparse
import collections
import sys
from nototools import cldr_data
from nototools import cmap_data
from nototools import compare_cmap_data
from nototools import collect_cldr_punct
from nototools import noto_data
from nototools import opentype_data
from nototools import tool_utils
from nototools import unicode_data
_MERGED_SCRIPTS_BY_TARGET = {
'CJK': 'Bopo Hang Hani Hans Hant Hira Jpan Kana Kore'.split(),
'LGC': 'Latn Grek Cyrl'.split(),
}
def _invert_script_to_chars(script_to_chars):
"""Convert script_to_chars to char_to_scripts and return."""
char_to_scripts = collections.defaultdict(set)
for script, cps in script_to_chars.iteritems():
for cp in cps:
char_to_scripts[cp].add(script)
return char_to_scripts
class CmapOps(object):
def __init__(self, script_to_chars=None, log_events=False, log_details=False,
undefined_exceptions = None):
if script_to_chars == None:
self._script_to_chars = {}
else:
self._script_to_chars = {
script: set(script_to_chars[script])
for script in script_to_chars
}
self._log_events = log_events
self._log_details = log_details
self._suppressed_blocks = {
'Hangul Jamo',
'Kangxi Radicals',
'Kanbun',
'CJK Symbols and Punctuation',
'Hangul Compatibility Jamo',
'CJK Strokes',
'Enclosed CJK Letters and Months',
'CJK Compatibility',
'CJK Compatibility Ideographs',
'CJK Compatibility Ideographs Supplement',
'CJK Unified Ideographs Extension A',
'CJK Unified Ideographs Extension B',
'CJK Unified Ideographs Extension C',
'CJK Unified Ideographs Extension D',
'CJK Unified Ideographs Extension E',
'CJK Unified Ideographs',
'CJK Radicals Supplement',
'Hangul Jamo Extended-A',
'Hangul Jamo Extended-B',
'Hangul Syllables',
}
self._suppressed_scripts = {
'EXCL',
}
self._block = None
self._undefined_exceptions = undefined_exceptions or set()
def _report(self, text):
if self._log_events:
print text
def _finish_block(self):
if self._block and self._log_events and not self._log_details:
for text in sorted(self._block_count):
print '%s: %s' % (
text, tool_utils.write_int_ranges(
self._block_count[text]))
def _report_cp(self, cp, text, script):
if not self._log_events:
return
cp_block = unicode_data.block(cp)
if cp_block != self._block:
self._finish_block()
self._block = cp_block
print '# block: ' + self._block
self._block_count = collections.defaultdict(set)
if self._log_details:
if not (
self._block in self._suppressed_blocks or
script in self._suppressed_scripts):
print self._cp_info(cp), text
else:
self._block_count[text].add(cp)
def _error(self, text):
print >> sys.stderr, text
raise ValueError('failed')
def _verify_script_exists(self, script):
if script not in self._script_to_chars:
self._error('script %s does not exist' % script)
def _verify_script_does_not_exist(self, script):
if script in self._script_to_chars:
self._error('script %s already exists' % script)
def _verify_scripts_exist(self, scripts):
for script in scripts:
self._verify_script_exists(script)
return sorted(scripts)
def _verify_script_empty(self, script):
if len(self._script_to_chars[script]):
self._error('script %s is not empty, cannot delete' % script)
def _cp_info(self, cp):
return '%04X (%s)' % (cp, unicode_data.name(cp, '<unnamed>'))
def _script_ok_add(self, cp, script):
if unicode_data.is_defined(cp) or cp in self._undefined_exceptions:
self._script_cp_ok_add(cp, script)
def _script_cp_ok_add(self, cp, script):
if cp not in self._script_to_chars[script]:
self._script_to_chars[script].add(cp)
self._report_cp(cp, 'added to ' + script, script)
def _script_ok_remove(self, cp, script):
if unicode_data.is_defined(cp):
self._script_cp_ok_remove(cp, script)
def _script_cp_ok_remove(self, cp, script):
if cp in self._script_to_chars[script]:
self._report_cp(cp, 'removed from ' + script, script)
self._script_to_chars[script].remove(cp)
def _finish_phase(self):
self._finish_block()
self._block = None
def phase(self, phase_name):
self._finish_phase()
self._report('\n# phase: ' + phase_name)
def log(self, log_msg):
self._report('# log: ' + log_msg)
def ensure_script(self, script):
if script in self._script_to_chars:
return
self.create_script(script)
def create_script(self, script):
self._verify_script_does_not_exist(script)
self._script_to_chars[script] = set()
self._report('# create script: ' + script)
def delete_script(self, script):
self._verify_script_exists(script)
self._verify_script_empty(script)
del self._script_to_chars[script]
self._report('# delete script: ' + script)
def add(self, cp, script):
self._verify_script_exists(script)
self._script_ok_add(cp, script)
def add_all(self, cps, script):
self._verify_script_exists(script)
for cp in sorted(cps):
self._script_ok_add(cp, script)
def add_all_to_all(self, cps, scripts):
scripts = self._verify_scripts_exist(scripts)
for cp in sorted(cps):
if unicode_data.is_defined(cp):
for script in scripts:
self._script_cp_ok_add(cp, script)
def remove(self, cp, script):
self._verify_script_exists(script)
self._script_ok_remove(cp, script)
def remove_all(self, cps, script):
self._verify_script_exists(script)
for cp in sorted(cps):
self._script_ok_remove(cp, script)
def remove_all_from_all(self, cps, scripts):
scripts = self._verify_scripts_exist(scripts)
for cp in sorted(cps):
if unicode_data.is_defined(cp):
for script in scripts:
self._script_cp_ok_remove(cp, script)
def remove_script_from(self, src_script, from_script):
self._verify_script_exists(from_script)
cps = self.script_chars(src_script)
for cp in cps:
self._script_ok_remove(cp, from_script)
def move_to_from(self, cp, to_script, from_script):
self._verify_script_exists(from_script)
self._verify_script_exists(to_script)
self._script_ok_add(cp, to_script)
self._script_ok_remove(cp, from_script)
def move_all_to_from(self, cps, to_script, from_script):
"""Combines add and remove."""
self._verify_script_exists(from_script)
self._verify_script_exists(to_script)
sorted_cps = sorted(cps)
for cp in sorted_cps:
self._script_ok_add(cp, to_script)
for cp in sorted_cps:
self._script_ok_remove(cp, from_script)
def all_scripts(self):
return self._script_to_chars.keys()
def create_char_to_scripts(self):
return _invert_script_to_chars(self._script_to_chars)
def script_chars(self, script):
self._verify_script_exists(script)
return sorted(self._script_to_chars[script])
def create_script_to_chars(self):
return {
script: set(self._script_to_chars[script])
for script in self._script_to_chars
}
def finish(self):
self._finish_phase()
def _build_block_to_primary_script():
"""Create a map from block to the primary script in a block.
If there are no characters defined in the block, it gets the script 'EXCL',
for 'exclude.' We don't define characters in this block.
If the most common script accounts for less than 80% of the defined characters
in the block, we use the primary from assigned_primaries, which might be None.
It's an error if there's no default primary and it's not listed in
assigned_primaries."""
assigned_primaries = {
'Basic Latin': 'Latn',
'Latin-1 Supplement': 'Latn',
'Vedic Extensions': 'Deva',
'Superscripts and Subscripts': 'Latn',
'Number Forms': 'Zyyy',
'CJK Symbols and Punctuation': 'CJK',
'Enclosed CJK Letters and Months': 'CJK',
'CJK Compatibility': 'CJK',
'Alphabetic Presentation Forms': None,
'Halfwidth and Fullwidth Forms': 'CJK',
'Kana Supplement': 'CJK',
}
inherited_primaries = {
'Combining Diacritical Marks': 'Latn',
'Combining Diacritical Marks Extended': 'Latn',
'Combining Diacritical Marks Supplement': 'Latn',
'Combining Diacritical Marks for Symbols': 'Zyyy',
'Variation Selectors': 'EXCL',
'Combining Half Marks': 'Latn',
'Variation Selectors Supplement': 'EXCL',
}
block_to_script = {}
for block in unicode_data.block_names():
start, finish = unicode_data.block_range(block)
script_counts = collections.defaultdict(int)
num = 0
for cp in range(start, finish + 1):
script = unicode_data.script(cp)
if script != 'Zzzz':
script_counts[script] += 1
num += 1
max_script = None
max_script_count = 0
for script, count in script_counts.iteritems():
if count > max_script_count:
max_script = script
max_script_count = count
if num == 0:
max_script = 'EXCL' # exclude
elif float(max_script_count) / num < 0.8:
info = sorted(script_counts.iteritems(), key=lambda t: (-t[1], t[0]))
block_info = '%s %s' % (block, ', '.join('%s/%d' % t for t in info))
if block in assigned_primaries:
max_script = assigned_primaries[block]
# print 'assigning primary', block_info, '->', max_script
else:
print >> sys.stderr, 'ERROR: no primary', block, block_info
max_script = None
elif max_script == 'Zinh':
if block in inherited_primaries:
max_script = inherited_primaries[block]
else:
print >> sys.stderr, 'ERROR: no inherited primary', block, block_info
max_script = None
block_to_script[block] = max_script
return block_to_script
_block_to_primary_script = None
def _primary_script_for_block(block):
"""Return the primary script for the block, or None if no primary script."""
global _block_to_primary_script
if not _block_to_primary_script:
_block_to_primary_script = _build_block_to_primary_script()
return _block_to_primary_script[block]
def _unassign_inherited_and_common_with_extensions(cmap_ops):
"""Inherited and common characters with an extension that is neither of
these get removed from inherited/common scripts."""
def remove_cps_with_extensions(script):
for cp in cmap_ops.script_chars(script):
for s in unicode_data.script_extensions(cp):
if s != 'Zinh' and s != 'Zyyy':
cmap_ops.remove(cp, script)
break
cmap_ops.phase('unassign inherited with extensions')
remove_cps_with_extensions('Zinh')
cmap_ops.phase('unassign common with extensions')
remove_cps_with_extensions('Zyyy')
def _reassign_inherited(cmap_ops):
"""Assign all 'Zinh' chars to the primary script in their block.
Fail if there's no primary script. 'Zinh' is removed from script_to_chars."""
cmap_ops.phase('reassign inherited')
for cp in cmap_ops.script_chars('Zinh'):
primary_script = _primary_script_for_block(unicode_data.block(cp))
if not primary_script:
print >> sys.stderr, 'Error: no primary script for %04X' % cp
elif primary_script == 'Zinh':
print >> sys.stderr, 'Error: primary script for %04X is Zinh' % cp
else:
cmap_ops.ensure_script(primary_script)
cmap_ops.add(cp, primary_script)
cmap_ops.remove(cp, 'Zinh')
cmap_ops.delete_script('Zinh')
def _reassign_common(cmap_ops):
"""Move 'Zyyy' chars in blocks where 'Zyyy' is not primary to the primary
script."""
cmap_ops.phase('reassign common')
for cp in cmap_ops.script_chars('Zyyy'):
primary_script = _primary_script_for_block(unicode_data.block(cp))
if primary_script != None and primary_script != 'Zyyy':
cmap_ops.ensure_script(primary_script)
cmap_ops.add(cp, primary_script)
cmap_ops.remove(cp, 'Zyyy')
def _unassign_latin(cmap_ops):
"""Remove some characters that extensions assigns to Latin but which we don't
need there."""
unwanted_latn = tool_utils.parse_int_ranges("""
0951 0952 # devanagari marks
10FB # Georgian paragraph separator
""")
cmap_ops.phase('unassign latin')
cmap_ops.remove_all(unwanted_latn, 'Latn')
def _assign_cldr_punct(cmap_ops):
"""Assigns cldr punctuation to scripts."""
for script, punct in collect_cldr_punct.script_to_punct().iteritems():
if script != 'CURRENCY':
cmap_ops.phase('assign cldr punct for ' + script)
cmap_ops.ensure_script(script)
for cp in punct:
cmap_ops.add(ord(cp), script)
def _reassign_scripts(cmap_ops, scripts, new_script):
"""Reassign all chars in scripts to new_script."""
assert new_script not in scripts
cmap_ops.phase('reassign scripts')
cmap_ops.ensure_script(new_script)
for script in sorted(scripts):
cmap_ops.phase('reassign %s to %s' % (script, new_script))
for cp in cmap_ops.script_chars(script):
cmap_ops.remove(cp, script)
cmap_ops.add(cp, new_script)
cmap_ops.delete_script(script)
def _reassign_merged_scripts(cmap_ops):
"""Reassign merged scripts."""
for target, scripts in sorted(_MERGED_SCRIPTS_BY_TARGET.iteritems()):
cmap_ops.phase('reassign to ' + target)
_reassign_scripts(cmap_ops, scripts, target)
def _reassign_common_by_block(cmap_ops):
"""Reassign common chars to new scripts based on block."""
block_assignments = {
'Spacing Modifier Letters': 'LGC',
'General Punctuation': 'LGC',
'Currency Symbols': 'LGC',
'Combining Diacritical Marks for Symbols': 'Zsym',
'Letterlike Symbols': 'LGC',
'Number Forms': 'Zsym',
'Arrows': 'Zmth',
'Mathematical Operators': 'Zmth',
'Miscellaneous Technical': 'Zsym',
'Control Pictures': 'SYM2',
'Optical Character Recognition': 'SYM2',
'Enclosed Alphanumerics': 'Zsym',
'Box Drawing': 'MONO',
'Block Elements': 'MONO',
'Geometric Shapes': 'SYM2', # change
'Miscellaneous Symbols': 'Zsym',
'Dingbats': 'SYM2',
'Miscellaneous Mathematical Symbols-A': 'Zmth',
'Supplemental Arrows-A': 'Zmth',
'Supplemental Arrows-B': 'Zmth',
'Miscellaneous Mathematical Symbols-B': 'Zmth',
'Supplemental Mathematical Operators': 'Zmth',
'Miscellaneous Symbols and Arrows': 'SYM2',
'Supplemental Punctuation': 'LGC',
'Ideographic Description Characters': 'CJK',
'Yijing Hexagram Symbols': 'SYM2',
'Modifier Tone Letters': 'LGC',
'Vertical Forms': 'CJK',
'CJK Compatibility Forms': 'CJK',
'Small Form Variants': 'CJK',
'Specials': 'SYM2',
'Ancient Symbols': 'SYM2',
'Phaistos Disc': 'SYM2',
'Byzantine Musical Symbols': 'MUSIC',
'Musical Symbols': 'MUSIC',
'Tai Xuan Jing Symbols': 'SYM2',
'Mathematical Alphanumeric Symbols': 'Zmth',
'Mahjong Tiles': 'SYM2',
'Domino Tiles': 'SYM2',
'Playing Cards': 'SYM2',
'Enclosed Alphanumeric Supplement': 'Zsym',
'Enclosed Ideographic Supplement': 'CJK',
'Miscellaneous Symbols and Pictographs': 'SYM2',
'Emoticons': 'SYM2',
'Ornamental Dingbats': 'SYM2',
'Transport and Map Symbols': 'SYM2',
'Alchemical Symbols': 'Zsym',
'Geometric Shapes Extended': 'SYM2',
'Supplemental Arrows-C': 'SYM2',
'Supplemental Symbols and Pictographs': 'SYM2',
'Tags': 'EXCL',
}
cmap_ops.phase('reassign common by block')
used_assignments = set()
last_block = None
for cp in cmap_ops.script_chars('Zyyy'):
block = unicode_data.block(cp)
if block != last_block:
last_block = block
if block not in block_assignments:
print >> sys.stderr, 'ERROR: no assignment for block %s' % block
new_script = None
else:
new_script = block_assignments[block]
cmap_ops.ensure_script(new_script)
used_assignments.add(block)
if new_script:
cmap_ops.remove(cp, 'Zyyy')
cmap_ops.add(cp, new_script)
else:
print >> sys.stderr, ' could not assign %04x %s' % (
cp, unicode_data.name(cp))
if len(used_assignments) != len(block_assignments):
print >> sys.stderr, 'ERROR: some block assignments unused'
unused = set([block for block in block_assignments
if block not in used_assignments])
for block in unicode_data.block_names():
if block in unused:
print >> sys.stderr, ' %s' % block
unused.remove(block)
if unused:
print >> sys.stderr, 'ERROR: unknown block names'
for block in sorted(unused):
print >> sys.stderr, ' %s' % block
cmap_ops.delete_script('Zyyy')
def _block_cps(block):
start, end = unicode_data.block_range(block)
return frozenset([
cp for cp in range(start, end + 1)
if unicode_data.is_defined(cp)])
def _reassign_by_block(cmap_ops):
"""Reassign all chars in select blocks to designated scripts."""
# block, from, to. from '*' means from all scripts.
block_assignments = [
('Number Forms', 'LGC', 'Zsym'),
('Halfwidth and Fullwidth Forms', 'LGC', 'CJK'),
('Aegean Numbers', '*', 'Linb'),
('Ancient Greek Numbers', '*', 'SYM2'),
('Ancient Symbols', 'LGC', 'SYM2'),
('Braille Patterns', 'Brai', 'SYM2'),
('Coptic Epact Numbers', '*', 'SYM2'),
('Rumi Numeral Symbols', '*', 'SYM2'),
('Ancient Greek Musical Notation', '*', 'MUSIC'),
('Counting Rod Numerals', 'CJK', 'SYM2'),
('Arabic Mathematical Alphabetic Symbols', '*', 'Zmth'),
('High Surrogates', '*', 'EXCL'),
('High Private Use Surrogates', '*', 'EXCL'),
('Low Surrogates', '*', 'EXCL'),
('Private Use Area', '*', 'EXCL'),
('Variation Selectors', '*', 'EXCL'),
('Tags', '*', 'EXCL'),
('Variation Selectors Supplement', '*', 'EXCL'),
('Supplementary Private Use Area-A', '*', 'EXCL'),
('Supplementary Private Use Area-B', '*', 'EXCL'),
]
block_assignments = sorted(
block_assignments, key=lambda k: unicode_data.block_range(k[0])[0])
cmap_ops.phase('reassign by block')
char_to_scripts = cmap_ops.create_char_to_scripts()
for block, from_scripts, to_script in block_assignments:
start, finish = unicode_data.block_range(block)
if from_scripts == '*':
all_scripts = True
else:
all_scripts = False
from_scripts = from_scripts.split()
for cp in range(start, finish + 1):
if not unicode_data.is_defined(cp):
continue
if cp not in char_to_scripts and to_script != 'EXCL':
print >> sys.stderr, 'reassign missing %04X %s' % (
cp, unicode_data.name(cp, '<unnamed>'))
continue
if all_scripts:
from_list = char_to_scripts[cp]
else:
from_list = from_scripts
for from_script in from_list:
if from_script == to_script:
continue
if not all_scripts and (from_script not in from_scripts):
continue
cmap_ops.remove(cp, from_script)
cmap_ops.add(cp, to_script)
def _remove_empty(cmap_ops):
"""Remove any empty scripts (Braille should be one)."""
cmap_ops.phase('remove empty')
script_to_chars = cmap_ops.create_script_to_chars()
for script, chars in script_to_chars.iteritems():
if not chars:
cmap_ops.delete_script(script)
def _reassign_symbols(cmap_ops):
"""Some symbols belong together but get split up when we assign by block."""
cmap_ops.phase('reassign symbols')
white_arrow_parts = tool_utils.parse_int_ranges(
'2b00-2b04 1f8ac-1f8ad')
cmap_ops.move_all_to_from(white_arrow_parts, 'Zsym', 'SYM2')
tv_symbols = tool_utils.parse_int_ranges('23fb-23fe 2b58')
cmap_ops.move_all_to_from(tv_symbols, 'SYM2', 'Zsym')
# we want a copy in SYM2 for sizes, assume MATH will do its own thing
# in context.
math_circles = tool_utils.parse_int_ranges('2219 2299 22c5')
cmap_ops.add_all(math_circles, 'SYM2')
# keyboard symbols, user interface symbols, media play symbols
misc_tech = tool_utils.parse_int_ranges(
'2318 231a-231b 2324-2328 232b 237d 23ce-23cf 23e9-23fa 23fb-23fe')
cmap_ops.move_all_to_from(misc_tech, 'SYM2', 'Zsym')
# Split Miscellaneous Symbols into SYM2 and Zsym by related symbols.
# mostly this is based on whether the group of symbols seems to have a use
# in running text or is based on some alphabetic character.
to_sym2 = tool_utils.parse_int_ranges(
"""2600-2609 # weather
260e-2612 # ballot box
2614 # umbrella with rain
2615 # hot beverage
2616-2617 # shogi pieces
261a-261f # pointing hands
2620-2623 # caution signs
2626-262f 2638 # religious/political
2630-2637 # chinese trigrams
2668 # hot springs
267f # wheelchair symbol
2686-2689 # go markers
268a-268f # yijing monograms/diagrms
269e-269f # closed captioning
26a1 # high voltage
26aa-26ac # circles
26bd-26be # sports
26bf # squared key
26c0-26c3 # checkers/draughts
26c4-26c8 # weather
26c9-26ca # more shogi
26cb # game symbol
""")
to_zsym = tool_utils.parse_int_ranges(
"""260a-260d # alchemical symbols
2613 # saltire
2618-2619 # shamrock, floral bullet
2624-2625 # medical, ankh
2639-263b # smiley faces
263c-2647 # astrological
2648-2653 # western zodiac
2654-265f # western chess
2660-2667 # card suits
2669-266f # music symbols
2670-2671 # syriac cross
2672-267d # recycling
267e # paper
2680-2685 # die faces
2690-269b # dictionary and map symbols, go with Zsym since dictionary use
269c # fleur-de-lis
269d # outlined white star, a symbol of morocco
26a0 # warning sign (exclamation point inside rounded triangle)
26a2-26a9 # gender
26ad-26b1 # genealogical
26b2 # gender
26b3-26bc # astrological
26cc-26cd # traffic signs
26ce # zodiac
26cf-26e1 # traffic signs again
26e2 # astronomical
26e3 # map symbol
26e4-26e7 # pentagrams
26e8-26ff # more map symbols
""")
# sanity check
duplicate_cps = to_sym2 & to_zsym
if duplicate_cps:
raise Exception(
'%d cps in both from and to symbols: %s' % (
len(duplicate_cps), tool_utils.write_int_ranges(duplicate_cps)))
missing_cps = set(range(0x2600, 0x2700))
missing_cps -= to_zsym
missing_cps -= to_sym2
if missing_cps:
raise Exception(
'%d cps from Misc. Symbols in neither from nor to symbols: %s' % (
len(missing_cps), tool_utils.write_int_ranges(missing_cps)))
cmap_ops.move_all_to_from(to_sym2, 'SYM2', 'Zsym')
cmap_ops.move_all_to_from(to_zsym, 'Zsym', 'SYM2')
# neutral face should go with smiley faces, which are in Zsym
cmap_ops.move_to_from(0x1f610, 'Zsym', 'SYM2')
# more math symbols that are geometric and might want dual treatment
more_math = tool_utils.parse_int_ranges('2981 29bf 29eb')
cmap_ops.add_all(more_math, 'SYM2')
# let's put white arrows into Sym2
white_arrows = tool_utils.parse_int_ranges(
"""21e6 21e8 21e7 21e9 21f3 2b04 2b00-2b03 1f8ac 1f8ad 21ea-21f0
""")
cmap_ops.move_all_to_from(white_arrows, 'SYM2', 'Zsym')
# circled digits should all go into Symbols
circled_digits = tool_utils.parse_int_ranges(
"""24ea # circled digit 0
2460-2473 # circled digit 1-9, number 10-20
24ff # negative circled digit 0
1f10c # dingbat negative circled sans-serif digit 0
2776-277f # dingbat negative circled digits 1-9, number 10
2780-2789 # dingbat circled sans-serif digits 1-9, number 10
278a-2793 # dingbat negative circled sans-serif digits 1-9, number 10
24eb-24f4 # negative circled number 11-20
1f10b # dingbat circled sans-serif digit 0
""")
cmap_ops.move_all_to_from(circled_digits, 'Zsym', 'SYM2')
# hourglass with flowing sand is in a block that got assigned to Zsym by
# default. Looking at it and its neighbors, it seems really odd that these
# are with 'technical symbols'
emoji_symbols = tool_utils.parse_int_ranges('23f0-23f3')
cmap_ops.add_all(emoji_symbols, 'SYM2')
cmap_ops.remove_all(emoji_symbols, 'Zsym')
# neutral face should go with white smiling/frowning face, which are in Zsym
cmap_ops.add(0x1f610, 'Zsym')
cmap_ops.remove(0x1f610, 'SYM2')
def _reassign_emoji(cmap_ops):
"""Reassign all emoji to emoji-color. Then assign all emoji with default
text presentation, plus those with variation selectors, plus select
others, to SYM2."""
cmap_ops.phase('reassign emoji')
color_only_emoji = set(unicode_data.get_presentation_default_emoji())
color_only_emoji.remove(0x1f004) # mahjong tile red dragon
color_only_emoji.remove(0x1f0cf) # playing card black joker
# remove emoji with a variation selector that allows a text presentation
# include proposed variants from 2016/08/23
color_only_emoji -= unicode_data.get_unicode_emoji_variants(
'proposed_extra')
all_emoji = unicode_data.get_emoji()
cmap_ops.create_script('Zsye')
cmap_ops.add_all(all_emoji, 'Zsye')
cmap_ops.remove_all_from_all(color_only_emoji, ['Zsym', 'SYM2'])
def _assign_nastaliq(cmap_ops):
"""Create Aran script based on requirements doc."""
# Range spec matches "Noto Nastaliq requirements" doc, Tier 1.
urdu_chars = tool_utils.parse_int_ranges("""
0600-0604 060b-0614 061b 061c 061e-061f 0620 0621-063a
0640-0659 065e-066d 0670-0673 0679 067a-067b 067c 067d
067e 067f-0680 0681 0683-0684 0685-0686 0687 0688-0689
068a 068b 068c-068d 068e 068f 0691 0693 0696 0698 0699
069a 069e 06a6 06a9 06ab 06af-06b0 06b1 06b3 06b7 06ba
06bb 06bc 06be 06c0-06c4 06cc-06cd 06d0 06d2-06d5
06dd-06de 06e9 06ee-06ef 06f0-06f9 06ff 0759 075c 0763
0767-0769 076b-077d 08ff fbb2-fbc1 fd3e-fd3f fdf2
fdfa-fdfd""")
cmap_ops.phase('assign nastaliq')
cmap_ops.create_script('Aran')
cmap_ops.add_all(urdu_chars, 'Aran')
# These additional arabic were in phase 2 scripts.
additional_arabic = tool_utils.parse_int_ranges("""
0609 # ARABIC-INDIC PER MILLE SIGN
060a # ARABIC-INDIC PER TEN THOUSAND SIGN
063b # ARABIC LETTER KEHEH WITH TWO DOTS ABOVE
063c # ARABIC LETTER KEHEH WITH THREE DOTS BELOW
063d # ARABIC LETTER FARSI YEH WITH INVERTED V
063e # ARABIC LETTER FARSI YEH WITH TWO DOTS ABOVE
063f # ARABIC LETTER FARSI YEH WITH THREE DOTS ABOVE
065d # ARABIC REVERSED DAMMA
066e # ARABIC LETTER DOTLESS BEH
066f # ARABIC LETTER DOTLESS QAF
06a1 # ARABIC LETTER DOTLESS FEH
06a4 # ARABIC LETTER VEH
06e0 # ARABIC SMALL HIGH UPRIGHT RECTANGULAR ZERO
06e1 # ARABIC SMALL HIGH DOTLESS HEAD OF KHAH
076a # ARABIC LETTER LAM WITH BAR
""")
cmap_ops.add_all(additional_arabic, 'Aran')
# noto-fonts#597 requests exclamation point
# noto-fonts#449 requests european digits
european_digits = tool_utils.parse_int_ranges('0021 0030-0039')
cmap_ops.add_all(european_digits, 'Aran')
# noto-fonts#368 requests these characters
extra_arabic_1 = tool_utils.parse_int_ranges('067b 0684 068a 06b3 0759 0768')
cmap_ops.add_all(extra_arabic_1, 'Aran')
# noto-fonts#606 requests a few additional characters
extra_arabic_2 = tool_utils.parse_int_ranges('06c6 06c7 06ca 06d5')
cmap_ops.add_all(extra_arabic_2, 'Aran')
def _assign_complex_script_extra(cmap_ops):
"""Assigns Harfbuzz and USE characters to the corresponding scripts."""
# Based on harfbuzz hb-ot-shape-complex-private
# Removes Hang, Jungshik reports Behdad says it's not needed for Hang.
hb_complex_scripts = """
Arab Aran Bali Batk Beng Brah Bugi Buhd Cakm Cham Deva Dupl Egyp Gran
Gujr Guru Hano Hebr Hmng Java Kali Khar Khmr Khoj Knda Kthi Lana
Laoo Lepc Limb Mahj Mand Mani Mlym Modi Mong Mtei Mymr Nkoo Orya Phag
Phlp Rjng Saur Shrd Sidd Sind Sinh Sund Sylo Syrc Tagb Takr Tale Talu
Taml Tavt Telu Tfng Tglg Thai Tibt Tirh
""".split()
hb_extra = tool_utils.parse_int_ranges("""
200c # ZWNJ
200d # ZWJ
25cc # dotted circle""")
# these scripts are based on github noto-fonts#576
use_complex_scripts = """
Bali Batk Brah Bugi Buhd Hano Kthi Khar Lepc Limb Mtei Rjng Saur Sund
Sylo Tglg Tagb Tale Tavt
""".split()
# these characters are based on
# https://www.microsoft.com/typography/OpenTypeDev/USE/intro.htm
use_extra = tool_utils.parse_int_ranges("""
200b # ZWS
200c # ZWNJ
200d # ZWJ
25cc # dotted circle
00a0 # NBS
00d7 # multiplication sign
2012 # figure dash
2013 # en dash
2014 # em dash
2015 # horizontal bar
2022 # bullet
25fb # white medium square
25fc # black medium square
25fd # white medium small square
25fe # black medium small square""")
cmap_ops.phase('assign hb complex')
cmap_ops.add_all_to_all(hb_extra, hb_complex_scripts)
cmap_ops.phase('assign use complex')
cmap_ops.add_all_to_all(use_extra, use_complex_scripts)
def _assign_hyphens_for_autohyphenation(cmap_ops):
"""Assign hyphens per Roozbeh's request."""
hyphens = [
0x002d, # hyphen-minus
0x2010 # hyphen
]
# see github noto-fonts#524
# Cyrl, Grek, Latn rolled into LGC
# CJK not listed, these don't hyphenate, data is in CLDR for other reasons
hyphen_scripts = """
Arab Aran Armn Beng Copt Deva Ethi Geor Gujr Guru Hebr
Khmr Knda LGC Mlym Orya Taml Telu Thai Tibt
""".split()
cmap_ops.phase('assign hyphens')
cmap_ops.add_all_to_all(hyphens, hyphen_scripts)
def _generate_script_extra(script_to_chars):
"""Generate script extra table."""
for script in sorted(noto_data.P3_EXTRA_CHARACTERS_NEEDED):
block = None
cps = noto_data.P3_EXTRA_CHARACTERS_NEEDED[script]
chars = script_to_chars[script]
if script == 'Zsym':
chars.update(script_to_chars['Zmth'])
chars.update(script_to_chars['SYM2'])
chars.update(script_to_chars['MUSIC'])
chars.update(script_to_chars['MONO'])
for cp in sorted(cps):
if not unicode_data.is_defined(cp):
continue
name = unicode_data.name(cp, '<unnamed">')
if cp not in chars:
if block == None:
print "'%s': tool_utils.parse_int_ranges(\"\"\"" % script
cp_block = unicode_data.block(cp)
if cp_block != block:
block = cp_block
print ' # %s' % block
print ' %04X # %s' % (cp, name)
chars.add(cp)
if block != None:
print ' """),'
# maintained using 'regen_script_required' fn
_SCRIPT_REQUIRED = [
# Adlm - Adlm (Adlam)
('Adlm',
# Comment
"""
Additional characters recommended by Monotype.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
# Arabic
061F # ARABIC QUESTION MARK
# General Punctuation
204F # REVERSED SEMICOLON
# Supplemental Punctuation
2E41 # REVERSED COMMA
"""),
# Aghb - Caucasian Albanian
('Aghb',
# Comment
"""
From core specification.
""",
# Data
"""
# Combining Diacritical Marks
0304 # COMBINING MACRON
0331 # COMBINING MACRON BELOW
# Combining Half Marks
FE20 # COMBINING LIGATURE LEFT HALF
FE21 # COMBINING LIGATURE RIGHT HALF
FE22 # COMBINING DOUBLE TILDE LEFT HALF
FE23 # COMBINING DOUBLE TILDE RIGHT HALF
FE24 # COMBINING MACRON LEFT HALF
FE25 # COMBINING MACRON RIGHT HALF
FE26 # COMBINING CONJOINING MACRON
FE27 # COMBINING LIGATURE LEFT HALF BELOW
FE28 # COMBINING LIGATURE RIGHT HALF BELOW
FE29 # COMBINING TILDE LEFT HALF BELOW
FE2A # COMBINING TILDE RIGHT HALF BELOW
FE2B # COMBINING MACRON LEFT HALF BELOW
FE2C # COMBINING MACRON RIGHT HALF BELOW
FE2D # COMBINING CONJOINING MACRON BELOW
FE2E # COMBINING CYRILLIC TITLO LEFT HALF
FE2F # COMBINING CYRILLIC TITLO RIGHT HALF
"""),
# Ahom - Ahom
# Arab - Arabic
('Arab',
# Comment
"""
According to Roozbeh (and existing fonts) the following punctuation and
digits are used with and interact with Arabic characters. Hyphen and
comma are to align with Aran.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
0030 # DIGIT ZERO
0031 # DIGIT ONE
0032 # DIGIT TWO
0033 # DIGIT THREE
0034 # DIGIT FOUR
0035 # DIGIT FIVE
0036 # DIGIT SIX
0037 # DIGIT SEVEN
0038 # DIGIT EIGHT
0039 # DIGIT NINE
003A # COLON
# Latin-1 Supplement
00A0 # NO-BREAK SPACE
# Combining Diacritical Marks
034F # COMBINING GRAPHEME JOINER
# General Punctuation
200E # LEFT-TO-RIGHT MARK
200F # RIGHT-TO-LEFT MARK
2010 # HYPHEN
2011 # NON-BREAKING HYPHEN
204F # REVERSED SEMICOLON
# Supplemental Punctuation
2E41 # REVERSED COMMA
"""),
# Aran - Aran (Nastaliq)
('Aran',
# Comment
"""
Hyphens are required for Urdu from the Arabic
Guillimets used for Persian according to Behdad
Other punctuation was in phase2 fonts, so presumably from Kamal.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
002C # COMMA
002E # FULL STOP
003A # COLON
# Latin-1 Supplement
00AB # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
00BB # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
# Arabic
061C # ARABIC LETTER MARK
# General Punctuation
2010 # HYPHEN
2011 # NON-BREAKING HYPHEN
# Arabic Presentation Forms-A
FDF4 # ARABIC LIGATURE MOHAMMAD ISOLATED FORM
"""),
# Armi - Imperial Aramaic
# Armn - Armenian
('Armn',
# Comment
"""
Characters referenced in Armenian encoding cross ref page
see http://www.unicode.org/L2/L2010/10354-n3924-armeternity.pdf
also see http://man7.org/linux/man-pages/man7/armscii-8.7.html
also see core specification.
""",
# Data
"""
# Basic Latin
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002D # HYPHEN-MINUS
002E # FULL STOP
# Latin-1 Supplement
00A0 # NO-BREAK SPACE
00A7 # SECTION SIGN
# Spacing Modifier Letters
02BB # MODIFIER LETTER TURNED COMMA
# General Punctuation
2010 # HYPHEN
2014 # EM DASH
2019 # RIGHT SINGLE QUOTATION MARK
2024 # ONE DOT LEADER
# Alphabetic Presentation Forms
FB13 # ARMENIAN SMALL LIGATURE MEN NOW
FB14 # ARMENIAN SMALL LIGATURE MEN ECH
FB15 # ARMENIAN SMALL LIGATURE MEN INI
FB16 # ARMENIAN SMALL LIGATURE VEW NOW
FB17 # ARMENIAN SMALL LIGATURE MEN XEH
"""),
# Avst - Avestan
('Avst',
# Comment
"""
From Core Specification and NamesList.txt
www.unicode.org/L2/L2007/07006r-n3197r-avestan.pdf
""",
# Data
"""
# Basic Latin
002E # FULL STOP
# Latin-1 Supplement
00B7 # MIDDLE DOT
# General Punctuation
200C # ZERO WIDTH NON-JOINER
# Supplemental Punctuation
2E30 # RING POINT
2E31 # WORD SEPARATOR MIDDLE DOT
"""),
# Bali - Balinese
# Bamu - Bamum
# Bass - Bassa Vah
('Bass',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
002C # COMMA
002E # FULL STOP
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
"""),
# Batk - Batak
# Beng - Bengali
# Bhks - Bhks (Bhaiksuki)
# Brah - Brahmi
# Brai - Braille
# Bugi - Buginese
# Buhd - Buhid
# CJK - (Bopo,Hang,Hani,Hans,Hant,Hira,Jpan,Kana,Kore)
# Cakm - Chakma
# Cans - Canadian Aboriginal
('Cans',
# Comment
"""
From core specification and web sites.
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
"""),
# Cari - Carian
('Cari',
# Comment
"""
From core specification.
""",
# Data
"""
# Latin-1 Supplement
00B7 # MIDDLE DOT
# General Punctuation
205A # TWO DOT PUNCTUATION
205D # TRICOLON
# Supplemental Punctuation
2E31 # WORD SEPARATOR MIDDLE DOT
"""),
# Cham - Cham
('Cham',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002D # HYPHEN-MINUS
003A # COLON
003F # QUESTION MARK
# General Punctuation
2010 # HYPHEN
"""),
# Cher - Cherokee
('Cher',
# Comment
"""
From core specification and
http://www.unicode.org/L2/L2014/14064r-n4537r-cherokee.pdf section 8.
Core spec says 'uses latin punctuation', these are a subset of the latin-1
punct because the intent of listing them is to ensure that use in running
text works with the script.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0027 # APOSTROPHE
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
002F # SOLIDUS
003A # COLON
003B # SEMICOLON
003F # QUESTION MARK
005B # LEFT SQUARE BRACKET
005D # RIGHT SQUARE BRACKET
007E # TILDE
# Combining Diacritical Marks
0300 # COMBINING GRAVE ACCENT
0301 # COMBINING ACUTE ACCENT
0302 # COMBINING CIRCUMFLEX ACCENT
0304 # COMBINING MACRON
030B # COMBINING DOUBLE ACUTE ACCENT
030C # COMBINING CARON
0323 # COMBINING DOT BELOW
0324 # COMBINING DIAERESIS BELOW
0330 # COMBINING TILDE BELOW
0331 # COMBINING MACRON BELOW
# General Punctuation
2010 # HYPHEN
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
"""),
# Copt - Coptic
('Copt',
# Comment
"""
From Core specification and
http://std.dkuug.dk/JTC1/SC2/WG2/docs/n2636.pdf
""",
# Data
"""
# Basic Latin
002E # FULL STOP
003A # COLON
003B # SEMICOLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0300 # COMBINING GRAVE ACCENT
0301 # COMBINING ACUTE ACCENT
0302 # COMBINING CIRCUMFLEX ACCENT
0304 # COMBINING MACRON
0305 # COMBINING OVERLINE
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
033F # COMBINING DOUBLE OVERLINE
# Greek and Coptic
0374 # GREEK NUMERAL SIGN
0375 # GREEK LOWER NUMERAL SIGN
# General Punctuation
2019 # RIGHT SINGLE QUOTATION MARK
# Supplemental Punctuation
2E17 # DOUBLE OBLIQUE HYPHEN
# Combining Half Marks
FE24 # COMBINING MACRON LEFT HALF
FE25 # COMBINING MACRON RIGHT HALF
FE26 # COMBINING CONJOINING MACRON
"""),
# Cprt - Cypriot
# Deva - Devanagari
('Deva',
# Comment
"""
Email from Jelle, SHY was encoded as Macron by accident.
""",
# Data
"""
# Latin-1 Supplement
00AD # SOFT HYPHEN
"""),
# Dsrt - Deseret
# Dupl - Duployan shorthand (Duployan)
# Egyp - Egyptian hieroglyphs
# Elba - Elbasan
('Elba',
# Comment
"""
see http://www.unicode.org/L2/L2011/11050-n3985-elbasan.pdf
adds combining overbar and greek numerals for ones and tens, and
both stigma/digamma for 6.
""",
# Data
"""
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0305 # COMBINING OVERLINE
# Greek and Coptic
0391 # GREEK CAPITAL LETTER ALPHA
0392 # GREEK CAPITAL LETTER BETA
0393 # GREEK CAPITAL LETTER GAMMA
0394 # GREEK CAPITAL LETTER DELTA
0395 # GREEK CAPITAL LETTER EPSILON
0396 # GREEK CAPITAL LETTER ZETA
0397 # GREEK CAPITAL LETTER ETA
0398 # GREEK CAPITAL LETTER THETA
0399 # GREEK CAPITAL LETTER IOTA
039A # GREEK CAPITAL LETTER KAPPA
039B # GREEK CAPITAL LETTER LAMDA
039C # GREEK CAPITAL LETTER MU
039D # GREEK CAPITAL LETTER NU
039E # GREEK CAPITAL LETTER XI
039F # GREEK CAPITAL LETTER OMICRON
03A0 # GREEK CAPITAL LETTER PI
03DA # GREEK LETTER STIGMA
03DD # GREEK SMALL LETTER DIGAMMA
03DE # GREEK LETTER KOPPA
"""),
# Ethi - Ethiopic
('Ethi',
# Comment
"""
From core specification, also see
http://abyssiniagateway.net/fidel/l10n/
Recommends combining diaeresis 'for scholarly use', should look Ethiopian.
Also claims hyphen is not used, but a wikipedia page in Amharic does use
it, see
https://am.wikipedia.org/wiki/1_%E1%8A%A5%E1%88%BD%E1%88%98-%E1%8B%B3%E1%8C%8B%E1%8A%95
Western numerals and punctuation should look heavier to match the Ethiopic.
A keyboard standard is here:
See http://www.mcit.gov.et/documents/1268465/1282796/Keyboard+Layout+Standard/a8aa75ca-e125-4e25-872e-380e2a9b2313
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002B # PLUS SIGN
002E # FULL STOP
002F # SOLIDUS
003D # EQUALS SIGN
# Combining Diacritical Marks
0308 # COMBINING DIAERESIS
030E # COMBINING DOUBLE VERTICAL LINE ABOVE
# Mathematical Operators
22EE # VERTICAL ELLIPSIS
"""),
# Geor - Georgian
('Geor',
# Comment
"""
From core specification (references unspecified additionl latin punct), also
see example news article: http://www.civil.ge/geo/article.php?id=29970
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0025 # PERCENT SIGN
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002E # FULL STOP
003A # COLON
003B # SEMICOLON
# Latin-1 Supplement
00A0 # NO-BREAK SPACE
00B7 # MIDDLE DOT
# General Punctuation
2014 # EM DASH
2056 # THREE DOT PUNCTUATION
2057 # QUADRUPLE PRIME
2058 # FOUR DOT PUNCTUATION
2059 # FIVE DOT PUNCTUATION
205A # TWO DOT PUNCTUATION
205B # FOUR DOT MARK
205C # DOTTED CROSS
205D # TRICOLON
205E # VERTICAL FOUR DOTS
20BE # LARI SIGN
# Supplemental Punctuation
2E2A # TWO DOTS OVER ONE DOT PUNCTUATION
2E2B # ONE DOT OVER TWO DOTS PUNCTUATION
2E2C # SQUARED FOUR DOT PUNCTUATION
2E2D # FIVE DOT MARK
2E31 # WORD SEPARATOR MIDDLE DOT
"""),
# Glag - Glagolitic
('Glag',
# Comment
"""
See core specification. It refers to 'numerous diacritical marks', these
are not listed.
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
002C # COMMA
002E # FULL STOP
003B # SEMICOLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0303 # COMBINING TILDE
0305 # COMBINING OVERLINE
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
2056 # THREE DOT PUNCTUATION
2058 # FOUR DOT PUNCTUATION
2059 # FIVE DOT PUNCTUATION
"""),
# Goth - Gothic
('Goth',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
003A # COLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0304 # COMBINING MACRON
0305 # COMBINING OVERLINE
0308 # COMBINING DIAERESIS
0331 # COMBINING MACRON BELOW
"""),
# Gran - Grantha
('Gran',
# Comment
"""
From core specification.
""",
# Data
"""
# Devanagari
0951 # DEVANAGARI STRESS SIGN UDATTA
0952 # DEVANAGARI STRESS SIGN ANUDATTA
# Vedic Extensions
1CD0 # VEDIC TONE KARSHANA
1CD2 # VEDIC TONE PRENKHA
1CD3 # VEDIC SIGN NIHSHVASA
1CF2 # VEDIC SIGN ARDHAVISARGA
1CF3 # VEDIC SIGN ROTATED ARDHAVISARGA
1CF4 # VEDIC TONE CANDRA ABOVE
1CF8 # VEDIC TONE RING ABOVE
1CF9 # VEDIC TONE DOUBLE RING ABOVE
# Combining Diacritical Marks for Symbols
20F0 # COMBINING ASTERISK ABOVE
"""),
# Gujr - Gujarati
# Guru - Gurmukhi
('Guru',
# Comment
"""
From core specification.
""",
# Data
"""
# Miscellaneous Symbols
262C # ADI SHAKTI
"""),
# Hano - Hanunoo
# Hatr - Hatr (Hatran)
('Hatr',
# Comment
"""
See http://www.unicode.org/L2/L2012/12312-n4324-hatran.pdf (most info, but
not latest assignment, which doesn't have all digits shown here)
single and double vertical line, also ZWNJ in case ligatures need breaking
might want to ligate hatran digit 1 forms 11 (2), 111 (3), 1111 (4) to
look as the suggested (dropped) digits were represented in the doc.
""",
# Data
"""
# Basic Latin
007C # VERTICAL LINE
# General Punctuation
200C # ZERO WIDTH NON-JOINER
2016 # DOUBLE VERTICAL LINE
"""),
# Hebr - Hebrew
('Hebr',
# Comment
"""
From core specification, adds currency.
""",
# Data
"""
# Basic Latin
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
# Combining Diacritical Marks
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
034F # COMBINING GRAPHEME JOINER
# General Punctuation
200C # ZERO WIDTH NON-JOINER
200D # ZERO WIDTH JOINER
200E # LEFT-TO-RIGHT MARK
200F # RIGHT-TO-LEFT MARK
# Currency Symbols
20AA # NEW SHEQEL SIGN
# Letterlike Symbols
2135 # ALEF SYMBOL
2136 # BET SYMBOL
2137 # GIMEL SYMBOL
2138 # DALET SYMBOL
"""),
# Hluw - Anatolian Hieroglyphs
('Hluw',
# Comment
"""
see http://www.unicode.org/L2/L2012/12213-n4282-anatolian.pdf
""",
# Data
"""
# General Punctuation
200B # ZERO WIDTH SPACE
"""),
# Hmng - Pahawh Hmong
# Hrkt - Japanese syllabaries (Katakana Or Hiragana)
# Hung - Old Hungarian
('Hung',
# Comment
"""
see http://www.unicode.org/L2/L2012/12168r-n4268r-oldhungarian.pdf
letters with LTR override mirror reverse (!) "which has to be handled by
the rendering engine"
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
003A # COLON
# General Punctuation
200D # ZERO WIDTH JOINER
2010 # HYPHEN
201F # DOUBLE HIGH-REVERSED-9 QUOTATION MARK
204F # REVERSED SEMICOLON
205A # TWO DOT PUNCTUATION
205D # TRICOLON
205E # VERTICAL FOUR DOTS
# Supplemental Punctuation
2E2E # REVERSED QUESTION MARK
2E31 # WORD SEPARATOR MIDDLE DOT
2E41 # REVERSED COMMA
2E42 # DOUBLE LOW-REVERSED-9 QUOTATION MARK
"""),
# Ital - Old Italic
# Java - Javanese
# Kali - Kayah Li
('Kali',
# Comment
"""
From core specification, also see
http://www.unicode.org/L2/L2006/06073-n3038r-kayahli.pdf
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
003F # QUESTION MARK
# General Punctuation
2010 # HYPHEN
"""),
# Khar - Kharoshthi
('Khar',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002D # HYPHEN-MINUS
# General Punctuation
2010 # HYPHEN
"""),
# Khmr - Khmer
('Khmr',
# Comment
"""
Latin punct see web sites
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
"""),
# Khoj - Khojki
('Khoj',
# Comment
"""
From core specification, also see
http://www.unicode.org/L2/L2011/11021-khojki.pdf
""",
# Data
"""
# Basic Latin
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
003B # SEMICOLON
# General Punctuation
2013 # EN DASH
2026 # HORIZONTAL ELLIPSIS
"""),
# Knda - Kannada
# Kthi - Kaithi
('Kthi',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002B # PLUS SIGN
002D # HYPHEN-MINUS
# General Punctuation
2010 # HYPHEN
# Supplemental Punctuation
2E31 # WORD SEPARATOR MIDDLE DOT
"""),
# LGC - (Latn,Grek,Cyrl)
('LGC',
# Comment
"""
FE00 is for variant zero.
""",
# Data
"""
# Spacing Modifier Letters
02EA # MODIFIER LETTER YIN DEPARTING TONE MARK
02EB # MODIFIER LETTER YANG DEPARTING TONE MARK
# Letterlike Symbols
2100 # ACCOUNT OF
2101 # ADDRESSED TO THE SUBJECT
2103 # DEGREE CELSIUS
2105 # CARE OF
2106 # CADA UNA
2109 # DEGREE FAHRENHEIT
2113 # SCRIPT SMALL L
2116 # NUMERO SIGN
2117 # SOUND RECORDING COPYRIGHT
211E # PRESCRIPTION TAKE
2120 # SERVICE MARK
2121 # TELEPHONE SIGN
2122 # TRADE MARK SIGN
2127 # INVERTED OHM SIGN
2129 # TURNED GREEK SMALL LETTER IOTA
212E # ESTIMATED SYMBOL
213B # FACSIMILE SIGN
214B # TURNED AMPERSAND
214D # AKTIESELSKAB
# Number Forms
2150 # VULGAR FRACTION ONE SEVENTH
2151 # VULGAR FRACTION ONE NINTH
2152 # VULGAR FRACTION ONE TENTH
2153 # VULGAR FRACTION ONE THIRD
2154 # VULGAR FRACTION TWO THIRDS
2155 # VULGAR FRACTION ONE FIFTH
2156 # VULGAR FRACTION TWO FIFTHS
2157 # VULGAR FRACTION THREE FIFTHS
2158 # VULGAR FRACTION FOUR FIFTHS
2159 # VULGAR FRACTION ONE SIXTH
215A # VULGAR FRACTION FIVE SIXTHS
215B # VULGAR FRACTION ONE EIGHTH
215C # VULGAR FRACTION THREE EIGHTHS
215D # VULGAR FRACTION FIVE EIGHTHS
215E # VULGAR FRACTION SEVEN EIGHTHS
215F # FRACTION NUMERATOR ONE
2184 # LATIN SMALL LETTER REVERSED C
2189 # VULGAR FRACTION ZERO THIRDS
# Variation Selectors
FE00 # VARIATION SELECTOR-1
# Specials
FFFC # OBJECT REPLACEMENT CHARACTER
FFFD # REPLACEMENT CHARACTER
"""),
# Lana - Lanna (Tai Tham)
# Laoo - Lao
('Laoo',
# Comment
"""
For latin punct use see web sites, e.g. nuol.edu.la
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
003A # COLON
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
# Currency Symbols
20AD # KIP SIGN
"""),
# Lepc - Lepcha
('Lepc',
# Comment
"""
From core specification, only the specificially mentioned punct.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003F # QUESTION MARK
"""),
# Limb - Limbu
('Limb',
# Comment
"""
From core specification.
""",
# Data
"""
# Devanagari
0965 # DEVANAGARI DOUBLE DANDA
"""),
# Lina - Linear A
# Linb - Linear B
# Lisu - Fraser (Lisu)
('Lisu',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002D # HYPHEN-MINUS
003A # COLON
003B # SEMICOLON
003F # QUESTION MARK
# Spacing Modifier Letters
02BC # MODIFIER LETTER APOSTROPHE
02CD # MODIFIER LETTER LOW MACRON
# General Punctuation
2010 # HYPHEN
2026 # HORIZONTAL ELLIPSIS
# CJK Symbols and Punctuation
300A # LEFT DOUBLE ANGLE BRACKET
300B # RIGHT DOUBLE ANGLE BRACKET
"""),
# Lyci - Lycian
('Lyci',
# Comment
"""
From core specification.
""",
# Data
"""
# General Punctuation
205A # TWO DOT PUNCTUATION
"""),
# Lydi - Lydian
('Lydi',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
003A # COLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Supplemental Punctuation
2E31 # WORD SEPARATOR MIDDLE DOT
"""),
# MUSIC - MUSIC
('MUSIC',
# Comment
"""
Characters not in standard music blocks.
""",
# Data
"""
# Miscellaneous Symbols
2669 # QUARTER NOTE
266A # EIGHTH NOTE
266B # BEAMED EIGHTH NOTES
266C # BEAMED SIXTEENTH NOTES
266D # MUSIC FLAT SIGN
266E # MUSIC NATURAL SIGN
266F # MUSIC SHARP SIGN
"""),
# Mahj - Mahajani
('Mahj',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002D # HYPHEN-MINUS
003A # COLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
# General Punctuation
2013 # EN DASH
"""),
# Mand - Mandaean (Mandaic)
('Mand',
# Comment
"""
From core specification.
""",
# Data
"""
# Arabic
0640 # ARABIC TATWEEL
"""),
# Mani - Manichaean
# Marc - Marc (Marchen)
# Mend - Mende (Mende Kikakui)
# Merc - Meroitic Cursive
('Merc',
# Comment
"""
From core specification.
also see http://www.unicode.org/L2/L2009/09188r-n3646-meroitic.pdf
""",
# Data
"""
# Basic Latin
003A # COLON
# General Punctuation
2026 # HORIZONTAL ELLIPSIS
205D # TRICOLON
"""),
# Mero - Meroitic (Meroitic Hieroglyphs)
# Mlym - Malayalam
# Modi - Modi
('Modi',
# Comment
"""
From core specification, also see
http://www.unicode.org/L2/L2011/11212r2-n4034-modi.pdf
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003B # SEMICOLON
"""),
# Mong - Mongolian
('Mong',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
003F # QUESTION MARK
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
2048 # QUESTION EXCLAMATION MARK
2049 # EXCLAMATION QUESTION MARK
"""),
# Mroo - Mro
# Mtei - Meitei Mayek (Meetei Mayek)
# Mult - Mult (Multani)
# Mymr - Myanmar
('Mymr',
# Comment
"""
From core specification.
""",
# Data
"""
# General Punctuation
200B # ZERO WIDTH SPACE
"""),
# Narb - Old North Arabian
# Nbat - Nabataean
# Newa - Newa
# Nkoo - N'Ko (N'Ko)
('Nkoo',
# Comment
"""
From core specification.
""",
# Data
"""
# Arabic
060C # ARABIC COMMA
061B # ARABIC SEMICOLON
061F # ARABIC QUESTION MARK
# Supplemental Punctuation
2E1C # LEFT LOW PARAPHRASE BRACKET
2E1D # RIGHT LOW PARAPHRASE BRACKET
# Arabic Presentation Forms-A
FD3E # ORNATE LEFT PARENTHESIS
FD3F # ORNATE RIGHT PARENTHESIS
"""),
# Ogam - Ogham
# Olck - Ol Chiki
('Olck',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
002C # COMMA
003F # QUESTION MARK
# General Punctuation
2014 # EM DASH
2018 # LEFT SINGLE QUOTATION MARK
2019 # RIGHT SINGLE QUOTATION MARK
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
"""),
# Orkh - Orkhon (Old Turkic)
('Orkh',
# Comment
"""
From core specification.
""",
# Data
"""
# General Punctuation
205A # TWO DOT PUNCTUATION
# Supplemental Punctuation
2E30 # RING POINT
"""),
# Orya - Oriya
# Osge - Osge (Osage)
# Osma - Osmanya
# Palm - Palmyrene
# Pauc - Pau Cin Hau
('Pauc',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002E # FULL STOP
"""),
# Perm - Old Permic
('Perm',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0027 # APOSTROPHE
003A # COLON
# Latin-1 Supplement
00B7 # MIDDLE DOT
# Combining Diacritical Marks
0300 # COMBINING GRAVE ACCENT
0306 # COMBINING BREVE
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
0313 # COMBINING COMMA ABOVE
# Cyrillic
0483 # COMBINING CYRILLIC TITLO
# Combining Diacritical Marks for Symbols
20DB # COMBINING THREE DOTS ABOVE
"""),
# Phag - Phags-pa
# Phli - Inscriptional Pahlavi
# Phlp - Psalter Pahlavi
('Phlp',
# Comment
"""
from core specification.
""",
# Data
"""
# Arabic
0640 # ARABIC TATWEEL
"""),
# Phnx - Phoenician
# Plrd - Pollard Phonetic (Miao)
# Prti - Inscriptional Parthian
# Rjng - Rejang
('Rjng',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003A # COLON
"""),
# Runr - Runic
# Samr - Samaritan
('Samr',
# Comment
"""
From core specification.
""",
# Data
"""
# Supplemental Punctuation
2E31 # WORD SEPARATOR MIDDLE DOT
"""),
# Sarb - Old South Arabian
# Saur - Saurashtra
('Saur',
# Comment
"""
From core specification, only the specificially mentioned punct.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003F # QUESTION MARK
"""),
# Sgnw - SignWriting
# Shaw - Shavian
('Shaw',
# Comment
"""
From core specification.
""",
# Data
"""
# Latin-1 Supplement
00B7 # MIDDLE DOT
"""),
# Shrd - Sharada
# Sidd - Siddham
# Sind - Khudawadi
('Sind',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002E # FULL STOP
003A # COLON
003B # SEMICOLON
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
# General Punctuation
2013 # EN DASH
2014 # EM DASH
"""),
# Sinh - Sinhala
('Sinh',
# Comment
"""
From core specification, plus unspecified latin punctuation seen on web
sites.
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
# Devanagari
0964 # DEVANAGARI DANDA
"""),
# Sora - Sora Sompeng
('Sora',
# Comment
"""
From core specification and
http://www.unicode.org/L2/L2009/09189r-n3647r-sora-sompeng.pdf
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
003B # SEMICOLON
# General Punctuation
2010 # HYPHEN
"""),
# Sund - Sundanese
('Sund',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
0022 # QUOTATION MARK
002D # HYPHEN-MINUS
003C # LESS-THAN SIGN
003E # GREATER-THAN SIGN
003F # QUESTION MARK
# General Punctuation
2010 # HYPHEN
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
"""),
# Sylo - Syloti Nagri
('Sylo',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003A # COLON
003B # SEMICOLON
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
# General Punctuation
2055 # FLOWER PUNCTUATION MARK
"""),
# Syrc - Syriac
('Syrc',
# Comment
"""
From core specification. In it, the reference to 'arabic harakat' used with
Garshuni is based on the Harakat section of the wikipedia page on Arabic
diacritics.
""",
# Data
"""
# Combining Diacritical Marks
0303 # COMBINING TILDE
0304 # COMBINING MACRON
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
030A # COMBINING RING ABOVE
0320 # COMBINING MINUS SIGN BELOW
0323 # COMBINING DOT BELOW
0324 # COMBINING DIAERESIS BELOW
0325 # COMBINING RING BELOW
032D # COMBINING CIRCUMFLEX ACCENT BELOW
032E # COMBINING BREVE BELOW
0330 # COMBINING TILDE BELOW
# Arabic
060C # ARABIC COMMA
061B # ARABIC SEMICOLON
061F # ARABIC QUESTION MARK
0640 # ARABIC TATWEEL
064E # ARABIC FATHA
064F # ARABIC DAMMA
0650 # ARABIC KASRA
0651 # ARABIC SHADDA
0652 # ARABIC SUKUN
0653 # ARABIC MADDAH ABOVE
0670 # ARABIC LETTER SUPERSCRIPT ALEF
0671 # ARABIC LETTER ALEF WASLA
# General Punctuation
200C # ZERO WIDTH NON-JOINER
"""),
# Tagb - Tagbanwa
# Takr - Takri
('Takr',
# Comment
"""
From core specification.
""",
# Data
"""
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
"""),
# Tale - Tai Le
('Tale',
# Comment
"""
From core specification & http://www.unicode.org/L2/L2001/01369-n2372.pdf
Myanmar digits have glyphic variants according to the spec.
""",
# Data
"""
# Basic Latin
002C # COMMA
002E # FULL STOP
003A # COLON
003F # QUESTION MARK
# Combining Diacritical Marks
0300 # COMBINING GRAVE ACCENT
0301 # COMBINING ACUTE ACCENT
0307 # COMBINING DOT ABOVE
0308 # COMBINING DIAERESIS
030C # COMBINING CARON
# Myanmar
1040 # MYANMAR DIGIT ZERO
1041 # MYANMAR DIGIT ONE
1042 # MYANMAR DIGIT TWO
1043 # MYANMAR DIGIT THREE
1044 # MYANMAR DIGIT FOUR
1045 # MYANMAR DIGIT FIVE
1046 # MYANMAR DIGIT SIX
1047 # MYANMAR DIGIT SEVEN
1048 # MYANMAR DIGIT EIGHT
1049 # MYANMAR DIGIT NINE
# General Punctuation
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
# CJK Symbols and Punctuation
3002 # IDEOGRAPHIC FULL STOP
"""),
# Talu - New Tai Lue
# Taml - Tamil
('Taml',
# Comment
"""
From core specificaion and
http://www.unicode.org/L2/L2010/10407-ext-tamil-follow2.pdf
""",
# Data
"""
# Latin-1 Supplement
00B2 # SUPERSCRIPT TWO
00B3 # SUPERSCRIPT THREE
# Superscripts and Subscripts
2074 # SUPERSCRIPT FOUR
2082 # SUBSCRIPT TWO
2083 # SUBSCRIPT THREE
2084 # SUBSCRIPT FOUR
"""),
# Tang - Tangut
# Tavt - Tai Viet
('Tavt',
# Comment
"""
Used in SIL fonts.
""",
# Data
"""
# Latin Extended-D
A78B # LATIN CAPITAL LETTER SALTILLO
A78C # LATIN SMALL LETTER SALTILLO
"""),
# Telu - Telugu
# Tfng - Tifinagh
('Tfng',
# Comment
"""
From core specification.
""",
# Data
"""
# Combining Diacritical Marks
0302 # COMBINING CIRCUMFLEX ACCENT
0304 # COMBINING MACRON
0307 # COMBINING DOT ABOVE
0309 # COMBINING HOOK ABOVE
# General Punctuation
200D # ZERO WIDTH JOINER
"""),
# Tglg - Tagalog
# Thaa - Thaana
('Thaa',
# Comment
"""
From core specification, parens from text sample. Probably other punct
as well but spec does not list.
""",
# Data
"""
# Basic Latin
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002E # FULL STOP
# Arabic
060C # ARABIC COMMA
061B # ARABIC SEMICOLON
061F # ARABIC QUESTION MARK
"""),
# Thai - Thai
('Thai',
# Comment
"""
From core specification and
http://www.unicode.org/L2/L2010/10451-patani-proposal.pdf
for latin punct see web sites e.g. pandip.com, sanook.com
Bhat already here, or should be
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0022 # QUOTATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002E # FULL STOP
003A # COLON
003F # QUESTION MARK
# Spacing Modifier Letters
02BC # MODIFIER LETTER APOSTROPHE
02D7 # MODIFIER LETTER MINUS SIGN
# Combining Diacritical Marks
0303 # COMBINING TILDE
0331 # COMBINING MACRON BELOW
# General Punctuation
200B # ZERO WIDTH SPACE
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
2026 # HORIZONTAL ELLIPSIS
"""),
# Tibt - Tibetan
('Tibt',
# Comment
"""
Wheel of Dharma from core specification, not sure of source for vertical
line.
""",
# Data
"""
# Basic Latin
007C # VERTICAL LINE
# Miscellaneous Symbols
2638 # WHEEL OF DHARMA
"""),
# Tirh - Tirhuta
('Tirh',
# Comment
"""
From core specification.
""",
# Data
"""
# Devanagari
0964 # DEVANAGARI DANDA
0965 # DEVANAGARI DOUBLE DANDA
"""),
# Ugar - Ugaritic
# Vaii - Vai
('Vaii',
# Comment
"""
From core specification.
""",
# Data
"""
# Basic Latin
002C # COMMA
002D # HYPHEN-MINUS
"""),
# Wara - Varang Kshiti (Warang Citi)
('Wara',
# Comment
"""
"Uses latin punctuation," so guess based on sample text from
proposal doc, see
http://www.unicode.org/L2/L2012/12118-n4259-warang-citi.pdf
""",
# Data
"""
# Basic Latin
0021 # EXCLAMATION MARK
0028 # LEFT PARENTHESIS
0029 # RIGHT PARENTHESIS
002C # COMMA
002D # HYPHEN-MINUS
002E # FULL STOP
003A # COLON
003B # SEMICOLON
003F # QUESTION MARK
# General Punctuation
2013 # EN DASH
2014 # EM DASH
201C # LEFT DOUBLE QUOTATION MARK
201D # RIGHT DOUBLE QUOTATION MARK
"""),
# Xpeo - Old Persian
# Xsux - Sumero-Akkadian Cuneiform (Cuneiform)
# Yiii - Yi
('Yiii',
# Comment
"""
From core specification.
""",
# Data
"""
# CJK Symbols and Punctuation
3001 # IDEOGRAPHIC COMMA
3002 # IDEOGRAPHIC FULL STOP
"""),
]
# This is a utility function that parses the _script_required data
# and spits it out again in the above format. When editing the
# above data, just type in the hex values, then run this to regenerate
# the source in sorted order with block labels and codepoint names.
def _regen_script_required():
"""Rerun after editing script required to check/reformat."""
script_to_comment_and_data = {
script: (comment, data)
for script, comment, data in _SCRIPT_REQUIRED
}
scripts = set(unicode_data.all_scripts())
for to_script, from_scripts in _MERGED_SCRIPTS_BY_TARGET.iteritems():
scripts.add(to_script)
scripts -= set(from_scripts)
# keep extra script data, e.g. 'Aran'
scripts.update(set(script_to_comment_and_data.keys()))
scripts -= set(['Zinh', 'Zyyy', 'Zzzz'])
for script in sorted(scripts):
if script in _MERGED_SCRIPTS_BY_TARGET:
script_name = '(%s)' % ','.join(_MERGED_SCRIPTS_BY_TARGET[script])
else:
script_name = cldr_data.get_english_script_name(script)
try:
unicode_script_name = unicode_data.human_readable_script_name(script)
if script_name.lower() != unicode_script_name.lower():
script_name += ' (%s)' % unicode_script_name
except KeyError:
pass
script_name = script_name.replace(unichr(0x2019), "'")
print ' # %s - %s' % (script, script_name)
if script in script_to_comment_and_data:
print " ('%s'," % script
lines = []
comment, data = script_to_comment_and_data[script]
lines.append(' # Comment')
lines.append('"""')
for line in comment.strip().splitlines():
lines.append(line.strip())
lines.append('""",')
lines.append('# Data')
lines.append('"""')
cps = tool_utils.parse_int_ranges(data)
block = None
for cp in sorted(cps):
cp_block = unicode_data.block(cp)
if cp_block != block:
block = cp_block
lines.append('# ' + block)
cp_name = unicode_data.name(cp, '<unnamed>')
lines.append('%04X # %s' % (cp, cp_name))
lines.append('"""),')
print '\n '.join(lines)
print
def _assign_script_required(cmap_ops):
"""Assign extra characters for various scripts."""
for script, _, data in _SCRIPT_REQUIRED:
extra = tool_utils.parse_int_ranges(data)
cmap_ops.phase('assign script required for ' + script)
cmap_ops.add_all(extra, script)
def _assign_script_special_chars(cmap_ops):
"""Assign special characters listed in opentype_data."""
cmap_ops.phase('assign special chars')
for script, chars in opentype_data.SPECIAL_CHARACTERS_NEEDED.iteritems():
cmap_ops.add_all(frozenset(chars), script)
def _assign_legacy_phase2(cmap_ops):
"""Assign legacy chars in some scripts, excluding some blocks."""
legacy_data = cmap_data.read_cmap_data_file('data/noto_cmap_phase2.xml')
legacy_map = cmap_data.create_map_from_table(legacy_data.table)
legacy_script_to_chars = {
script: tool_utils.parse_int_ranges(row.ranges)
for script, row in legacy_map.iteritems()}
# The default is to include all legacy characters, except for the chars
# listed for these scripts, for some default chars, and for some scripts.
# Find out why these were included in the phase two fonts.
# This excludes lots of punctuation and digits from Cham, Khmer, and Lao
# but leaves some common latin characters like quotes, parens, comma/period,
# and so on.
exclude_script_ranges = {
'Cham': '23-26 2A-2B 30-39 3C-3E 40 5B-60 7B-7E 037E',
'Copt': '0323 0361 1dcd 25cc',
'Deva': '00AF', # Jelle says this was encoded by accident, should be 00AD
'Kthi': '0030-0039',
'Khmr': '23-26 2A-2B 30-39 3C-3E 40 5B-60 7B-7E 037E',
'LGC': '03E2',
'Lana': '2219',
'Laoo': '23-26 2A-2B 30-39 3C-3E 40 5B-60 7B-7E 037E',
'Limb': '0964', # I think double-danda was intended
'Mlym': '0307 0323',
'Syrc': '250C 2510', # box drawing?
'Tavt': 'A78C',
}
# mono temporarily
ignore_legacy = frozenset('LGC Zsye Zsym MONO'.split())
ignore_cps = frozenset([0x0, 0xd, 0x20, 0xa0, 0xfeff])
cmap_ops.phase('assign legacy phase 2')
script_to_chars = cmap_ops.create_script_to_chars()
for script in sorted(legacy_script_to_chars):
if script not in script_to_chars:
cmap_ops.log('skipping script %s' % script)
continue
if script in ignore_legacy:
cmap_ops.log('ignoring %s' % script)
continue
script_chars = script_to_chars[script]
legacy_chars = legacy_script_to_chars[script]
missing_legacy = set(legacy_chars) - set(script_chars) - ignore_cps
if script in exclude_script_ranges:
ranges = exclude_script_ranges[script]
missing_legacy -= set(tool_utils.parse_int_ranges(ranges))
if missing_legacy:
cmap_ops.phase('assign legacy %s' % script)
cmap_ops.add_all(missing_legacy, script)
def _check_CJK():
# not used
# check CJK
cmap_ops.log('check cjk legacy')
legacy_cjk_chars = set()
for script in _MERGED_SCRIPTS_BY_TARGET['CJK']:
if script in legacy_script_to_chars:
legacy_cjk_chars |= legacy_script_to_chars[script]
cjk_chars = script_to_chars['CJK']
not_in_legacy = cjk_chars - legacy_cjk_chars
# ignore plane 2 and above
not_in_legacy -= set(range(0x20000, 0x120000))
if not_in_legacy:
print 'not in legacy (%d):' % len(not_in_legacy)
compare_cmap_data._print_detailed(not_in_legacy)
not_in_new = legacy_cjk_chars - cjk_chars
if not_in_new:
print 'not in new (%d):' % len(not_in_new)
compare_cmap_data._print_detailed(not_in_new)
def _assign_bidi_mirroring(cmap_ops):
"""Ensure that if a bidi mirroring char is in a font, its mirrored char
is too."""
cmap_ops.phase('bidi mirroring')
script_to_chars = cmap_ops.create_script_to_chars()
mirrored = unicode_data.mirrored_chars()
for script, cps in sorted(script_to_chars.iteritems()):
mirrored_in_script = cps & mirrored
if not mirrored_in_script:
continue
sibs = set(unicode_data.bidi_mirroring_glyph(cp)
for cp in mirrored_in_script)
missing_sibs = sibs - mirrored_in_script
if missing_sibs:
cmap_ops.log('adding %d missing bidi chars' % len(missing_sibs))
cmap_ops.add_all(missing_sibs, script)
def _unassign_lgc_from_symbols(cmap_ops):
"""Characters in LGC don't need to be in Symbols or Sym2."""
cmap_ops.phase('unassign lgc from symbols')
lgc_set = frozenset(cmap_ops.script_chars('LGC'))
sym_set = frozenset(cmap_ops.script_chars('Zsym'))
sym2_set = frozenset(cmap_ops.script_chars('SYM2'))
sym_set_to_remove = sym_set & lgc_set
sym2_set_to_remove = sym2_set & lgc_set
cmap_ops.remove_all(sym_set_to_remove, 'Zsym')
cmap_ops.remove_all(sym2_set_to_remove, 'SYM2')
def _assign_programming_lang_symbols(cmap_ops):
"""Assign characters used in programming languages, which generally
should be in MONO and in some cases need to be compatible with math
in general."""
def add_mirrored(cps):
mirrored_cps = set()
for cp in cps:
if unicode_data.mirrored(cp):
mirrored_glyph = unicode_data.bidi_mirroring_glyph(cp)
if mirrored_glyph != None:
mirrored_cps.add(mirrored_glyph)
cps |= (mirrored_cps)
# some characters we want to preserve in symbols despite adding them
# to math.
preserve_symbols_cps = tool_utils.parse_int_ranges(
"""
2190 # LEFTWARDS ARROW
2191 # UPWARDS ARROW
2192 # RIGHTWARDS ARROW
2193 # DOWNWARDS ARROW
2194 # LEFT RIGHT ARROW
2195 # UP DOWN ARROW
2474 # PARENTHESIZED DIGIT ONE
2475 # PARENTHESIZED DIGIT TWO
266d # MUSIC FLAT SIGN
266e # MUSIC NATURAL SIGN
266f # MUSIC SHARP SIGN
27f6 # LONG RIGHTWARDS ARROW
""")
# similarly, preserve some in symbols2
preserve_symbols2_cps = tool_utils.parse_int_ranges(
"""
21e8 # RIGHTWARDS WHITE ARROW
2219 # BULLET OPERATOR
2299 # CIRCLED DOT OPERATOR
25a1 # WHITE SQUARE
25b7 # WHITE RIGHT-POINTING TRIANGLE
25bb # WHITE RIGHT-POINTING POINTER
25c2 # BLACK LEFT-POINTING SMALL TRIANGLE
25c3 # WHITE LEFT-POINTING SMALL TRIANGLE
25c5 # WHITE LEFT-POINTING POINTER
25c7 # WHITE DIAMOND
25c8 # WHITE DIAMOND CONTAINING BLACK SMALL DIAMOND
25cb # WHITE CIRCLE
2736 # SIX POINTED BLACK STAR
""")
cmap_ops.phase('programming - haskell')
# see noto-fonts#669 agda non-ascii character list
haskell_cps = tool_utils.parse_int_ranges(
"""
00a0 00ac 00b2 00b7 00b9 00bd 00d7 00e0 00e9 00f3 00f6-00f7 019b
02b0 02b3 02e1-02e2 0307 0393 0398 03a0 03a3 03b5 03b7 03bb-03be
03c1 03c3-03c4 03c6 03c8-03c9 2022 2026 2032-2033 203c 203f
2045-2046 2070 207a-207b 207f-2089 2113 2115 211a 2124 2190-2194
219d-219e 21a0 21a2-21a3 21a6 21d0-21d4 21db 21e8 2200-2201
2203-2205 2208-2209 220b 220e 2218-2219 221e 2223 2227-222a
2236-2238 223c 2241 2243 2245 2247-224b 2254 2257 225f 2261-2262
2264-2265 226c 226e-2273 2275 227a-227b 2286-2288 228e 2291-229c
22a4-22a5 22b4 22b8 22c2-22c3 22c6 22c9-22ca 22ce 22d0 22e2
2308-230b 236e 2474-2475 25a1 25b7 25bb 25c2-25c3 25c5 25c7-25c8
266d 266f 2736 27e6-27eb 27f6 2987-2988 2a00 2a05-2a06 ff5b ff5d
""")
# add extra not in the set above:
# (from github.com/adobe-fonts/source-code-pro/issues/114)
haskell_cps |= tool_utils.parse_int_ranges(
"""2202 2210 2220 2234 2235 2284 2285 2289""")
# see comment from joeyaiello on noto-fonts/issues/669
# others mentioned in that comment are already in haskell
haskell_cps.add(0x2195)
# add mirrored cps to this set
add_mirrored(haskell_cps)
# add 'leftwards' variants (not mirrored) and a few other variants
# because it seems odd to split these groups even if there's no use for
# them in haskell.
leftwards_variants = tool_utils.parse_int_ranges(
"""
# Arrows
219c # LEFTWARDS WAVE ARROW (ref 219d)
21a4 # LEFTWARDS ARROW FROM BAR (ref 21a6)
21da # LEFTWARDS TRIPLE ARROW (ref 21db)
21e6 # LEFTWARDS WHITE ARROW (ref 21e8)
# Miscellaneous Technical
2310 # REVERSED NOT SIGN (ref 00ac)
2319 # TURNED NOT SIGN (ref 00ac)
# Miscellaneous Symbols
266e # MUSIC NATURAL SIGN (ref 266d)
# Supplemental Arrows-A
27f5 # LONG LEFTWARDS ARROW (ref 27f6)
""")
haskell_cps |= leftwards_variants
cmap_ops.add_all_to_all(haskell_cps, ['Zmth', 'MONO'])
cmap_ops.remove_all(haskell_cps - preserve_symbols_cps, 'Zsym')
cmap_ops.remove_all(haskell_cps - preserve_symbols2_cps, 'SYM2')
cmap_ops.phase('programming - APL')
# For the below APL sets, see noto-fonts#751
apl_cps = tool_utils.parse_int_ranges(
"""
0021 0024 0027-0029 002b-002c 002e-002f 003a-003f 005b-005d 005f
007b 007d 00a8 00af 00d7 00f7 2190-2193 2205-2207 220a 2212 2218
2223 2227-222a 2235 223c 2260-2262 2264-2265 2282-2283 2286-2287
2296 22a2-22a5 22c4 22c6 2308 230a 2336-237a 2395 25cb
""")
# do not use circled uppercase letters as a substitute for APL underscored
# letters. Dyalog APL does this and hacks a font to make them to render as
# underscored. Also apl385 does this and renders these as underscored. This
# is contrary to Unicode (which should just have gone ahead and encoded these,
# but I guess balked since they were already kind of deprecated by that time).
# apl_cps |= tool_utils.parse_int_ranges('24B6-24CF')
# additionally requested relational algebra symbols
apl_cps |= tool_utils.parse_int_ranges('22c8-22ca 25b7 27d5-27d7')
# additionally requested NARS symbols
apl_cps |= tool_utils.parse_int_ranges('00a7 03c0 221a 221e 2299')
add_mirrored(apl_cps)
# Android doesn't want MONO as a fallback, so no codepoint should be added
# only to MONO and not to any other Noto font.
cmap_ops.add_all_to_all(apl_cps, ['MONO', 'Zmth'])
def _assign_symbols_from_groups(cmap_ops):
"""Use 'group data' to assign various symbols to Zmth, Zsym, SYM2,
MONO, MUSIC' based on character groups. This fine-tunes the block
assignments (some related symbols are scattered across blocks,
and symbols blocks are themselves mixed)."""
cmap_ops.phase('assign symbols from groups')
with open('codepoint_groups.txt', 'r') as f:
for lineix, line in enumerate(f):
ix = line.find('#')
if ix >= 0:
line = line[:ix]
line = line.strip()
if not line:
continue
cols = [s.strip() for s in line.split(';')]
if not len(cols) == 3:
print ('incorrect cols on line %d "%s"' % (lineix, line))
if cols[0] == '':
# no assignments for this line
continue
add, remove = [], []
for s in cols[0].split():
if s.startswith('-'):
remove.append(s[1:])
else:
add.append(s)
name = cols[1]
# We use parens to delimit parts of the ranges that are 'for
# reference' but should not impact codepoint assignment.
# since parse_int_ranges doesn't understand these, strip
# out the parenthesized sections. These don't nest but we
# don't check for this, only that open ranges are closed.
ranges = cols[2]
parts = None
ix = 0
while ix < len(ranges):
open_p = ranges.find('(', ix)
if open_p < 0:
if parts != None:
parts.append(ranges[ix:].strip())
break
close_p = ranges.find(')', open_p+1)
if close_p < 0:
raise Exception(
'unclosed paren in ranges on line %d "%s"' % (lineix, line))
if parts == None:
parts = []
parts.append(ranges[ix:open_p])
ix = close_p + 1
if parts:
ranges = ' '.join(parts)
try:
cps = tool_utils.parse_int_ranges(ranges)
except Exception as err:
print >> sys.stderr, err
print >> sys.stderr, cols[2]
print >> sys.stderr, 'problem on %d "%s"' % (lineix, line)
raise err
if len(cps) > 50:
print >> sys.stderr, 'large range (%d) on %d "%s"' % (
len(cps), lineix, line)
cmap_ops.log('group: %s (%d)' % (name, len(cps)))
if add:
cmap_ops.add_all_to_all(cps, add)
if remove:
cmap_ops.remove_all_from_all(cps, remove)
def _assign_mono(cmap_ops):
"""Monospace should be similar to LGC, with the addition of box drawing
and block elements. It should also include all CP437 codepoints."""
cmap_ops.phase('assign mono')
lgc_chars = cmap_ops.script_chars('LGC')
cmap_ops.add_all(lgc_chars, 'MONO')
cp437_cps = unicode_data.codeset('cp437')
cmap_ops.phase('assign cp437 to mono')
assert cp437_cps != None
cmap_ops.add_all(cp437_cps, 'MONO')
# for variant zero
cmap_ops.add(0xfe00, 'MONO')
# geometric shapes should be in MONO too, many are but they're scattered
cmap_ops.add_all(_block_cps('Geometric Shapes'), 'MONO')
def _assign_sym2(cmap_ops):
"""SYM2 should support enclosing keycaps, used to be in B/W Emoji."""
cmap_ops.phase('assign sym2')
keycap_chars = tool_utils.parse_int_ranges("""
0023 # Number Sign
002A # Asterisk
0030-0039 # Digits
20E3 # Combining Enclosing Keycap""")
cmap_ops.add_all(keycap_chars, 'SYM2')
def _assign_math(cmap_ops):
"""No longer use STIX character set, we will just fallback for characters
not in math. To this end, we remove any LGC characters except for ascii
letters, since combining harpoons/arrows in math might apply to them."""
cmap_ops.phase('assign math')
# We keep this here for awhile for reference, but no longer use it.
STIX_CPS = tool_utils.parse_int_ranges(
"""
0020-007e 00a0-0180 0188 0190 0192 0195 0199-019b 019e 01a0-01a1 01a5
01aa-01ab 01ad 01af-01b0 01b5 01ba-01bb 01be 01c0-01c3 01f0 01fa-01ff
0221 0234-0237 02b0-02e9 02ec-02ed 0300-033f 0346 034c 0359 035c
0360-0362 037e 0384-038a 038c 038e-03a1 03a3-03ce 03d0-03d2 03d5-03d6
03d8-03e1 03f0-03f1 03f4-03f6 0401-040c 040e-044f 0451-045c 045e-045f
0462-0463 046a-046b 0472-0475 0490-0491 1d00 1d07 1d1c 1d84-1d85 1d8a
1d8d-1d8e 1e80-1e85 1ef2-1ef3 2010-2022 2025-2026 2030-203c 203e 2040
2043-2044 2047 204e-2052 2057 205f 207f 20a3-20a4 20a7 20ac 20d0-20d2
20d6-20d7 20db-20df 20e1 20e4-20f0 2102 2105 2107 210a-2113 2115-211e
2122 2124-2129 212b-2138 213c-214b 2153-215e 2190-21ea 21f4-22ff 2302
2305-2306 2308-2313 2315-231a 231c-2323 2329-232a 232c-232e 2332 2336
233d 233f-2340 2353 2370 237c 2393-2394 239b-23b9 23ce 23d0 23dc-23e7
2423 2460-2468 24b6-24ea 2500 2502 2506 2508 250a 250c 2510 2514 2518
251c 2524 252c 2534 253c 2550-256c 2571-2572 2584 2588 258c 2590-2593
25a1-25ff 2606 2609 260c 260e 2612 2621 2639-2644 2646-2649 2660-2667
2669-266b 266d-266f 267e 2680-2689 26a0 26a5 26aa-26ac 26b2 2709 2713
2720 272a 2736 273d 2772-2773 2780-2793 279b 27c1-27c9 27cc 27d0-27ef
27f1-27ff 2901-2aff 2b13-2b41 2b43-2b4c 2b50-2b54 3030 fb00-fb04
1d401-1d454 1d456-1d49c 1d49e-1d49f 1d4a2 1d4a5-1d4a6 1d4a9-1d4ac
1d4ae-1d4b9 1d4bb 1d4bd-1d4c3 1d4c5-1d505 1d507-1d50a 1d50d-1d514
1d516-1d51c 1d51e-1d539 1d53b-1d53e 1d540-1d544 1d546 1d54a-1d550
1d552-1d6a5 1d6a8-1d7c9 1d7ce-1d7ff
""")
# Assume fallback will work for these in general
cmap_ops.remove_all(cmap_ops.script_chars('LGC'), 'Zmth')
cmap_ops.remove_all(cmap_ops.script_chars('SYM2'), 'Zmth')
# Add ASCII alphanumerics
alphanum = tool_utils.parse_int_ranges('0041-005a 0061-007a')
cmap_ops.add_all(alphanum, 'Zmth')
# Add back blocks that get split up too arbitrarily
cmap_ops.add_all(_block_cps('Mathematical Operators'), 'Zmth')
cmap_ops.add_all(_block_cps('Miscellaneous Mathematical Symbols-B'), 'Zmth')
def _remove_unwanted(cmap_ops):
"""Remove characters we know we don't want in any font."""
# Chars we never want.
unwanted_chars = tool_utils.parse_int_ranges("""
0000-001f # C0 controls
007F # DEL
0080-009f # C1 controls
FEFF # BOM""")
# Chars we don't want, but perhaps a bit more provisionally than the
# above.
excluded_chars = tool_utils.parse_int_ranges("""
332c # Jungshik says excluded on purpose
fa70-fad9 # Jungshik says Ken regards DPRK compatibility chars as
# outside of scope, like most of plane 2.
1b000-1b001 # Ken says these are controversial.""")
cmap_ops.phase('remove unwanted')
cmap_ops.remove_all_from_all(unwanted_chars, cmap_ops.all_scripts())
cmap_ops.add_all(unwanted_chars, 'EXCL')
cmap_ops.phase('remove excluded')
cmap_ops.remove_all_from_all(excluded_chars, cmap_ops.all_scripts())
cmap_ops.add_all(excluded_chars, 'EXCL')
def _assign_wanted(cmap_ops):
"""After we remove the characters we 'never want', add exceptions back in
to particular fonts."""
wanted_chars = {
'LGC': '20bf feff', # Bitcoin (not in Unicode 9 data yet), BOM
'MONO': 'feff', # BOM
'SYM2': '0000-001f 007f 0080-009f', # show as question mark char
'Zsye': 'fe4e5-fe4ee fe82c fe82e-fe837', # legacy PUA for android
}
cmap_ops.phase('assign wanted')
for script in sorted(wanted_chars.keys()):
chars = tool_utils.parse_int_ranges(wanted_chars[script])
cmap_ops.add_all(chars, script)
def _assign_basic(cmap_ops):
"""Add NUL, CR, Space, NBS to all scripts."""
basic_chars = frozenset([0x0, 0x0D, 0x20, 0xA0])
cmap_ops.phase('assign basic')
scripts_to_add = set(cmap_ops.all_scripts()) - set(['EXCL'])
cmap_ops.add_all_to_all(basic_chars, scripts_to_add)
def build_script_to_chars(log_level):
if log_level == 0:
log_events = False
log_details = False
else:
log_events = True
log_details = log_level > 1
script_to_chars = unicode_data.create_script_to_chars()
# Bitcoin is not in our unicode 9 data yet, allow it to be set anyway.
temp_defined = set([0x20bf])
cmap_ops = CmapOps(
script_to_chars, log_events=log_events, log_details=log_details,
undefined_exceptions=temp_defined)
_unassign_inherited_and_common_with_extensions(cmap_ops)
_reassign_inherited(cmap_ops)
_reassign_common(cmap_ops)
_unassign_latin(cmap_ops)
_assign_cldr_punct(cmap_ops)
_reassign_merged_scripts(cmap_ops)
_reassign_common_by_block(cmap_ops)
_reassign_by_block(cmap_ops)
_remove_empty(cmap_ops)
_reassign_symbols(cmap_ops)
_reassign_emoji(cmap_ops)
_assign_nastaliq(cmap_ops)
_assign_complex_script_extra(cmap_ops)
_assign_hyphens_for_autohyphenation(cmap_ops)
_assign_script_required(cmap_ops)
_assign_script_special_chars(cmap_ops)
_assign_legacy_phase2(cmap_ops)
_assign_bidi_mirroring(cmap_ops)
_unassign_lgc_from_symbols(cmap_ops)
_assign_programming_lang_symbols(cmap_ops)
_assign_symbols_from_groups(cmap_ops)
_assign_mono(cmap_ops) # after LGC is defined except for basics
_assign_sym2(cmap_ops) # after LGC removed, add back for enclosing keycaps
_assign_math(cmap_ops)
_remove_unwanted(cmap_ops) # comes before assign_basic, assign_wanted
_assign_wanted(cmap_ops)
_assign_basic(cmap_ops)
cmap_ops.finish() # so we can clean up log
return cmap_ops.create_script_to_chars()
def _merge_fallback_chars(script_to_chars, srcfile):
xtra_cmap_data = cmap_data.read_cmap_data_file(srcfile)
xtra_rowdata = cmap_data.create_map_from_table(xtra_cmap_data.table)
merged_cmap = {}
for script in sorted(script_to_chars):
cmap = script_to_chars[script]
xcmap = None
if script in xtra_rowdata:
rowdata = xtra_rowdata[script]
xcount = int(getattr(rowdata, 'xcount', -1))
if xcount != -1:
xcmap = tool_utils.parse_int_ranges(rowdata.xranges)
cmap -= xcmap
else:
xcmap = None # not a tuple, so probably no fallback data
else:
print >> sys.stderr, 'no script %s found in %s' % (script, srcfile)
merged_cmap[script] = (cmap, xcmap)
return merged_cmap
def _get_cmap_data(script_to_chars, metadata):
tabledata = cmap_data.create_table_from_map(script_to_chars)
return cmap_data.CmapData(metadata, tabledata)
### debug
def _dump_primaries():
for block in unicode_data.block_names():
block_range = unicode_data.block_range(block)
primary_script = _primary_script_for_block(block)
print '%13s %6s %s' % (
'%04X-%04X' % block_range,
'\'%s\'' % primary_script if primary_script else '------',
block)
def main():
DEFAULT_OUTFILE = 'noto_cmap_phase3_temp.xml'
parser = argparse.ArgumentParser()
parser.add_argument(
'-o', '--outfile', help='name of cmap file to output ("%s" if name '
'omitted)' % DEFAULT_OUTFILE, metavar='file', nargs='?', default=None,
const=DEFAULT_OUTFILE)
parser.add_argument(
'-m', '--merge', help='merge excluded/fallback data from file',
metavar='file')
parser.add_argument(
'-l', '--loglevel', help='log detail 0-2',
metavar='level', nargs='?', type=int, const=1, default=0)
parser.add_argument(
'--regen', help='reformat script required data, no cmap generation',
action='store_true')
args = parser.parse_args()
if args.regen:
_regen_script_required()
return
script_to_chars = build_script_to_chars(args.loglevel)
meta_params = []
if args.merge:
script_to_chars = _merge_fallback_chars(script_to_chars, args.merge)
meta_params.append(('mergefile', args.merge))
metadata = cmap_data.create_metadata('noto_cmap_reqs', meta_params)
cmapdata = _get_cmap_data(script_to_chars, metadata)
if args.outfile:
cmap_data.write_cmap_data_file(cmapdata, args.outfile, pretty=True)
print 'wrote %s' % args.outfile
else:
print cmap_data.write_cmap_data(cmapdata, pretty=True)
if __name__ == "__main__":
main()
|
anthrotype/nototools
|
nototools/noto_cmap_reqs.py
|
Python
|
apache-2.0
| 90,248
|
[
"FEFF",
"FLEUR"
] |
f2f6bd6e3221bc983a27284d5ff0c56bba052bee603dcc97f04513e91e58c298
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import espressomd.lb
from itertools import product
@utx.skipIfMissingFeatures(["EXTERNAL_FORCES"])
class LBSwitchActor(ut.TestCase):
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.time_step = 0.01
system.cell_system.skin = 0.1
def switch_test(self, GPU=False):
system = self.system
system.actors.clear()
system.part.add(pos=[1., 1., 1.], v=[1., 0, 0], fix=[1, 1, 1])
ext_force_density = [0.2, 0.3, 0.15]
lb_fluid_params = {'agrid': 2.0, 'dens': 1.0, 'visc': 1.0, 'tau': 0.03}
friction_1 = 1.5
friction_2 = 4.0
if GPU:
lb_fluid_1 = espressomd.lb.LBFluidGPU(**lb_fluid_params)
lb_fluid_2 = espressomd.lb.LBFluidGPU(**lb_fluid_params)
else:
lb_fluid_1 = espressomd.lb.LBFluid(**lb_fluid_params)
lb_fluid_2 = espressomd.lb.LBFluid(**lb_fluid_params)
system.actors.add(lb_fluid_1)
system.thermostat.set_lb(LB_fluid=lb_fluid_1, gamma=friction_1)
system.integrator.run(1)
force_on_part = -friction_1 * np.copy(system.part[0].v)
np.testing.assert_allclose(np.copy(system.part[0].f), force_on_part)
system.integrator.run(100)
self.assertNotAlmostEqual(lb_fluid_1[3, 3, 3].velocity[0], 0.0)
system.actors.remove(lb_fluid_1)
system.part[0].v = [1, 0, 0]
system.integrator.run(0)
np.testing.assert_allclose(np.copy(system.part[0].f), 0.0)
system.actors.add(lb_fluid_2)
system.thermostat.set_lb(LB_fluid=lb_fluid_2, gamma=friction_2)
for p in product(range(5), range(5), range(5)):
np.testing.assert_allclose(
np.copy(lb_fluid_2[p].velocity), np.zeros((3,)))
system.part[0].v = [1, 0, 0]
system.integrator.run(1)
np.testing.assert_allclose(
np.copy(system.part[0].f), [-friction_2, 0.0, 0.0])
def test_CPU_LB(self):
self.switch_test()
@utx.skipIfMissingGPU()
def test_GPU_LB(self):
self.switch_test(GPU=True)
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/lb_switch.py
|
Python
|
gpl-3.0
| 2,916
|
[
"ESPResSo"
] |
1fd0e4407997e82d6c97702a3bb05b1ed436c1a6a8af120edc2a78305fb28dcc
|
# Copyright 2012-2013 Dusty Phillips
# This file is part of gitifyhg.
# gitifyhg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitifyhg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitifyhg. If not, see <http://www.gnu.org/licenses/>.
# Some of this code comes from https://github.com/felipec/git/tree/fc/remote/hg
# but much of it has been rewritten.
from mercurial.context import memctx, memfilectx
from mercurial import encoding, extensions
from mercurial.error import Abort
from mercurial.node import hex as hghex # What idiot overrode a builtin?
from mercurial.node import short as hgshort
from mercurial.bookmarks import pushbookmark
from mercurial.scmutil import revsingle
from mercurial.util import version as hg_version
from distutils.version import StrictVersion
from .util import (die, output, git_to_hg_spaces, hgmode, branch_tip,
ref_to_name_reftype, BRANCH, BOOKMARK, TAG, user_config)
class dummyui(object):
def debug(self, msg):
pass
if StrictVersion(hg_version()) >= StrictVersion('2.8'):
stripext = extensions.load(dummyui(), 'strip', '')
def strip_revs(repo, processed_nodes):
stripext.strip(dummyui(), repo, processed_nodes)
else:
def strip_revs(repo, processed_nodes):
repo.mq.strip(repo, processed_nodes)
class GitExporter(object):
'''A processor when the remote receives a git-remote `export` command.
Provides export information to push commits from git to the mercurial
repository.'''
NULL_PARENT = '\0' * 20
def __init__(self, hgremote, parser):
self.hgremote = hgremote
self.marks = self.hgremote.marks
self.parsed_refs = self.hgremote.parsed_refs
self.parsed_tags = {} # refs to tuple of (message, author)
self.blob_marks = self.hgremote.blob_marks
self.repo = self.hgremote.repo
self.parser = parser
self.processed_marks = set()
self.processed_nodes = []
self.hgrc = user_config()
def process(self):
self.marks.store() # checkpoint
new_branch = False
push_bookmarks = []
self.parser.read_line()
for line in self.parser.read_block('done'):
command = line.split()[0]
if command not in ('blob', 'commit', 'reset', 'tag', 'feature'):
die('unhandled command: %s' % line)
getattr(self, 'do_%s' % command)()
updated_refs = {}
for ref, node in self.parsed_refs.iteritems():
if ref.startswith(self.hgremote.prefix):
# This seems to be a git fast-export bug
continue
name, reftype = ref_to_name_reftype(ref)
name = git_to_hg_spaces(name)
if reftype == BRANCH:
if name not in self.hgremote.branches:
new_branch = True
elif reftype == BOOKMARK:
old = self.hgremote.bookmarks.get(name)
old = old.hex() if old else ''
if not pushbookmark(self.repo, name, old, node):
continue
push_bookmarks.append((name, old, hghex(node)))
elif reftype == TAG:
self.write_tag(name, node)
else:
assert False, "unexpected reftype: %s" % reftype
updated_refs[ref] = node
success = False
try:
self.repo.push(self.hgremote.peer, force=False, newbranch=new_branch)
for bookmark, old, new in push_bookmarks:
self.hgremote.peer.pushkey('bookmarks', bookmark, old, new)
self.marks.store()
success = True
except Abort as e:
# mercurial.error.Abort: push creates new remote head f14531ca4e2d!
if e.message.startswith("push creates new remote head"):
self.marks.load() # restore from checkpoint
# strip revs, implementation finds min revision from list
if self.processed_nodes:
strip_revs(self.repo, self.processed_nodes)
else:
die("unknown hg exception: %s" % e)
# TODO: handle network/other errors?
for ref, node in updated_refs.items():
if success:
status = ""
name, reftype = ref_to_name_reftype(ref)
gitify_ref = self.hgremote.make_gitify_ref(name, reftype)
last_known_rev = self.marks.tips.get(gitify_ref)
new_rev = self.repo[node].rev()
if last_known_rev is not None and last_known_rev == new_rev:
# up to date status tells git that nothing has changed
# during the push for this ref, which prevents it from
# printing pointless status info to the user such as:
# * [new branch] master -> master
status = " up to date"
output("ok %s%s" % (ref, status))
else:
output("error %s non-fast forward" % ref) # TODO: other errors as well
output()
if not success:
# wait until fast-export finishes to muck with the marks file
self.remove_processed_git_marks()
def remove_processed_git_marks(self):
with self.hgremote.marks_git_path.open() as fread:
with self.hgremote.marks_git_path.open('r+') as fwrite:
for line in fread:
if not line.startswith(':'):
die("invalid line in marks-git: " + line)
mark = line[1:].split()[0]
if mark not in self.processed_marks:
fwrite.write(line)
fwrite.truncate()
def do_blob(self):
mark = self.parser.read_mark()
self.blob_marks[mark] = self.parser.read_data()
self.parser.read_line()
def do_reset(self):
ref = self.parser.line.split()[1]
# If the next line is a commit, allow it to process normally
if not self.parser.peek().startswith('from'):
return
from_mark = self.parser.read_mark()
from_revision = self.marks.mark_to_revision(from_mark)
self.parsed_refs[ref] = from_revision
# skip a line
self.parser.read_line()
def do_commit(self):
files = {}
extra = {}
from_mark = merge_mark = None
ref = self.parser.line.split()[1]
commit_mark = self.parser.read_mark()
author = self.parser.read_author()
committer = self.parser.read_author()
data = self.parser.read_data()
if self.parser.peek().startswith('from'):
from_mark = self.parser.read_mark()
if self.parser.peek().startswith('merge'):
merge_mark = self.parser.read_mark()
if self.parser.peek().startswith('merge'):
die('Octopus merges are not yet supported')
self.parser.read_line()
for line in self.parser.read_block(''):
if line.startswith('M'):
t, mode, mark_ref, path = line.split(' ', 3)
mark = int(mark_ref[1:])
filespec = {'mode': hgmode(mode), 'data': self.blob_marks[mark]}
elif line.startswith('D'):
t, path = line.split(' ', 1)
filespec = {'deleted': True}
if path[0] == '"' and path[-1] == '"':
path = path.decode('string-escape')[1:-1]
files[path] = filespec
user, date, tz = author
if committer != author:
extra['committer'] = "%s %u %u" % committer
if from_mark:
parent_from = self.marks.mark_to_revision(from_mark)
else:
parent_from = self.NULL_PARENT
if merge_mark:
parent_merge = self.marks.mark_to_revision(merge_mark)
else:
parent_merge = self.NULL_PARENT
# hg needs to know about files that changed from either parent
# whereas git only cares if it changed from the first parent.
if merge_mark:
for file in self.repo[parent_from].files():
if file not in files and file in\
self.repo[parent_from].manifest():
files[file] = {'ctx': self.repo[parent_from][file]}
name, reftype = ref_to_name_reftype(ref)
if reftype == BRANCH:
extra['branch'] = git_to_hg_spaces(name)
def get_filectx(repo, memctx, file):
filespec = files[file]
if 'deleted' in filespec:
raise IOError
if 'ctx' in filespec:
return filespec['ctx']
is_exec = filespec['mode'] == 'x'
is_link = filespec['mode'] == 'l'
rename = filespec.get('rename', None)
return memfilectx(file, filespec['data'],
is_link, is_exec, rename)
ctx = memctx(self.repo, (parent_from, parent_merge), data,
files.keys(), get_filectx, user, (date, tz), extra)
tmp = encoding.encoding
encoding.encoding = 'utf-8'
node = self.repo.commitctx(ctx)
encoding.encoding = tmp
self.parsed_refs[ref] = node
self.marks.new_mark(node, commit_mark)
self.processed_marks.add(str(commit_mark))
self.processed_nodes.append(node)
def do_tag(self):
name = self.parser.line.split()[1]
self.parser.read_mark()
tagger = self.parser.read_author()
message = self.parser.read_data()
self.parser.read_line()
self.parsed_tags[git_to_hg_spaces(name)] = tagger, message
def do_feature(self):
pass # Ignore
def write_tag(self, name, node):
branch = self.repo[node].branch()
# Calling self.repo.tag() doesn't append the tag to the correct
# commit. So I copied some of localrepo._tag into here.
# But that method, like much of mercurial's code, is ugly.
# So I then rewrote it.
tags_revision = revsingle(self.repo, hghex(branch_tip(self.repo, branch)))
if '.hgtags' in tags_revision:
old_tags = tags_revision['.hgtags'].data()
else:
old_tags = ''
newtags = [old_tags]
if old_tags and old_tags[-1] != '\n':
newtags.append('\n')
encoded_tag = encoding.fromlocal(name)
tag_line = '%s %s' % (hghex(node), encoded_tag)
if tag_line in old_tags:
return # Don't commit a tag that was previously committed
newtags.append(tag_line)
def get_filectx(repo, memctx, file):
return memfilectx(file, ''.join(newtags))
if name in self.parsed_tags:
author, message = self.parsed_tags[name]
user, date, tz = author
date_tz = (date, tz)
else:
message = "Added tag %s for changeset %s" % (name, hgshort(node))
user = self.hgrc.get("ui", "username", None)
date_tz = None # XXX insert current date here
ctx = memctx(self.repo,
(branch_tip(self.repo, branch), self.NULL_PARENT), message,
['.hgtags'], get_filectx, user, date_tz, {'branch': branch})
tmp = encoding.encoding
encoding.encoding = 'utf-8'
node = self.repo.commitctx(ctx)
encoding.encoding = tmp
|
kevinrodbe/gitifyhg
|
gitifyhg/gitexporter.py
|
Python
|
gpl-3.0
| 11,827
|
[
"Octopus"
] |
3a5721839335935105a9159268958fb9b35dbdde049a055af0e1580a72579ec1
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(1,'..') # allow parent modules to be imported
sys.path.insert(1,'../..') # allow parent modules to be imported
sys.path.insert(1,'../../..') # allow parent modules to be imported
import time
import params
from misc.utils import generate_OUinput, x_filter, get_changing_input, interpolate_input
import models.brian2.network_sim as net
import models.fp.fokker_planck_model as fp
import models.ln_exp.ln_exp_model as lnexp
import models.ln_dos.ln_dos_model as lndos
import models.ln_bexdos.ln_bexdos_model as lnbexdos
import models.spec1.spec1_model as s1
import models.spec2.spec2_model as s2
import models.spec2_red.spec2_red_model as s2_red
# use the following in IPython for qt plots: %matplotlib qt
# what will be computed
# network simulation
run_network = True
# full fokker planck model
run_fp = True
# reduced models
# ln cascade
run_ln_exp = True
run_ln_dos= True
run_ln_bexdos = False
# spectral
run_spec1 = True
run_spec2 = True
run_spec2_red = True
# use as default the parameters from file params.py
# if not specified else below
params = params.get_params()
# runtime options
# run simulation of uncoupled (rec=False) or recurrently coupled simulation (rec=True)
rec = True
params['runtime'] = 3000.
# number of neurons
params['N_total'] = 4000 #50000
# time steps for models
params['uni_dt'] = 0.01 # [ms]
params['fp_dt'] = 0.05
params['net_dt'] = 0.05
# coupling (and delay) params in the case of recurrency, i.e. rec = True
params['K'] = 100
params['J'] = 0.05
params['delay_type'] = 2
params['taud'] = 3.
params['const_delay'] = 5.
# adaptation params as scalars
params['a'] = 4.
params['b'] = 40.
# [only for reduced models] switch between two different time integration schemes: (1) Euler, (2) Heun
params['uni_int_order'] = 2
# for generating the input; for all models which do
# not have the same resolution we have to interpolate
params['min_dt'] = min(params['uni_dt'], params['net_dt'],params['fp_dt'])
ln_data = 'quantities_cascade.h5'
spec_data = 'quantities_spectral.h5'
params['t_ref'] = 0.0
# plotting section
plot_rates = True
plot_input = True
plot_adapt = True and (params['a'] > 0 or params['b'] > 0)
# external input mean
# for the external input mean and the standard deviation any type of input may be defined, such as constant, step, ramp
input_mean = 'steps' # similar to Fig1 of manuscript
# input_mean = 'osc'
# input_mean = 'const'
# input_mean = 'OU'
# input_mean = 'ramp'
# filter input mean (necessary for spectral_2m model)
filter_mean = True
#input_std = 'const'
#input_std = 'step'
#input_std = 'OU'
input_std = 'ramp'
filter_std = True
# external time trace used for generating input and plotting
# if time step is unequal to model_dt input gets interpolated for
# the respective model
steps = int(params['runtime']/params['min_dt'])
t_ext = np.linspace(0., params['runtime'], steps+1)
# time trace computed with min_dt
params['t_ext'] = t_ext
# for filter testing set seed
# np.random.seed(3)
# mu_ext variants
if input_mean == 'const':
mu_ext = np.ones(steps+1) * 4.0
# mu = OU process, sigma = const
elif input_mean == 'OU':
params['ou_X0'] = 0.
params['ou_mean'] = 6.0
params['ou_sigma'] = .5
params['ou_tau'] = 50.
mu_ext = generate_OUinput(params)
# oscillating input
elif input_mean == 'osc':
freq = 0.005 #kHz
amp = 0.1 #mV/ms
offset = 0.5 #mV/ms
mu_ext = offset*np.ones(len(t_ext)) + amp*np.sin(2*np.pi*freq*t_ext)
# input is ramped over a certain time interval from mu_start to mu_end
elif input_mean == 'ramp':
# define parameters for input
ramp_start = 500.
assert ramp_start < params['runtime']
ramp_duration = 30.
mu_start = 2.
mu_end = 4.
mu_ext = get_changing_input(params['runtime'],
ramp_start,params['min_dt'],mu_start,
mu_end,duration_change=ramp_duration)
# step input scenario for mean input
elif input_mean == 'steps':
# vals for steps
vals = [1, 1, 1, 1, 1, 1.7,
1.3,2.7, 2.4, 3.5,
3,3.4, 4.1, 3.7, 3.5,
2.5,3,3.5, 2, 2.5]
params['vals'] = vals
params['duration_vals'] = 150.
def step_plateaus_up_down(params):
steps = int(params['runtime']/params['min_dt'])
trace = np.zeros(steps+1)
val_idx = int(params['duration_vals']/params['min_dt'])
assert params['runtime']%params['duration_vals']==0
assert len(vals)*params['duration_vals'] == params['runtime']
for i in xrange(len(params['vals'])):
trace[i*val_idx:i*val_idx+val_idx] = params['vals'][i]
return trace
mu_ext=step_plateaus_up_down(params)
# sigma_ext variants
if input_std == 'const':
sigma_ext = np.ones(steps+1) * 2.
elif input_std == 'step':
sigma_ext = np.ones(steps+1)* 4.0
sigma_ext[int(steps/3):int(2*steps/3)] = 3.0
sigma_ext[int(2*steps/3):] = 1.5
# mu = const, sigma = OU process
elif input_std == 'OU':
params['ou_X0'] = 0. #only relevant if params['ou_stationary'] = False
params['ou_mean'] = 3.0
params['ou_sigma'] = 1.2
params['ou_tau'] = 1.
sigma_ext = generate_OUinput(params)
elif input_std == 'ramp':
# define parameters for input
ramp_start = 1500.
assert ramp_start < params['runtime']
ramp_duration = 100.
sigma_start = 3.5
sigma_end = 1.5
sigma_ext = get_changing_input(params['runtime'],ramp_start, params['min_dt'],sigma_start,
sigma_end,duration_change=ramp_duration)
else:
raise NotImplementedError
# enforce in any case sufficiently large input
mu_min = -1.0
mu_ext[mu_ext < mu_min] = mu_min - (mu_ext[mu_ext < mu_min] - mu_min)
mu_max = 5.
mu_ext[mu_ext > mu_max] = mu_max - (mu_ext[mu_ext > mu_max] - mu_max)
sigma_min = 0.5
sigma_ext[sigma_ext < sigma_min] = sigma_min - (sigma_ext[sigma_ext < sigma_min] - sigma_min)
sigma_max = 5.
sigma_ext[sigma_ext > sigma_max] = sigma_max - (sigma_ext[sigma_ext > sigma_max] - sigma_max)
# filter the input in order to have not sharp edges
# filter params
params['filter_type'] = 'gauss'
# filter width in time domain ~ 6*filter_gauss_sigma
# -> keep that in mind for resolution issues
params['filter_gauss_sigma'] = 1. #1 for ramps, 0.1-0.5 for OU
if filter_mean:
mu_ext_orig = mu_ext
mu_ext = x_filter(mu_ext_orig, params)
if filter_std:
sigma_ext_orig = sigma_ext
sigma_ext = x_filter(sigma_ext_orig, params)
# collect ext input for model wrappers
ext_input0 = [mu_ext, sigma_ext]
# saving results in global results dict
results = dict()
results['input_mean'] = mu_ext
results['input_sigma']= sigma_ext
results['model_results'] = dict()
print('\nModels run in {} mode.\n'.format('recurrent' if rec else 'feedforward'))
# brian network sim
if run_network:
ext_input = interpolate_input(ext_input0,params,'net')
results['model_results']['net'] = \
net.network_sim(ext_input, params, rec = rec)
#fokker planck equation solved using the Scharfetter-Gummel-flux approximation
if run_fp:
ext_input = interpolate_input(ext_input0, params, 'fp')
results['model_results']['fp'] = \
fp.sim_fp_sg(ext_input, params, rec=rec)
#reduced models
# models based on a linear-nonlinear cascade
if run_ln_exp:
ext_input = interpolate_input(ext_input0, params, 'reduced')
results['model_results']['ln_exp'] = \
lnexp.run_ln_exp(ext_input, params, ln_data,
rec_vars= params['rec_lne'], rec= rec)
if run_ln_dos:
ext_input = interpolate_input(ext_input0, params, 'reduced')
results['model_results']['ln_dos'] = \
lndos.run_ln_dos(ext_input, params,ln_data,
rec_vars= params['rec_lnd'],
rec= rec)
# models based on a spectral decomposition of the Fokker-Planck operator
if run_ln_bexdos:
ext_input = interpolate_input(ext_input0, params, 'reduced')
results['model_results']['ln_bexdos'] = \
lnbexdos.run_ln_bexdos(ext_input, params,ln_data,
rec_vars=['wm'], rec = rec)
if run_spec1:
ext_input = interpolate_input(ext_input0, params, 'reduced')
results['model_results']['spec1'] = \
s1.run_spec1(ext_input, params, spec_data,
rec_vars=params['rec_s1'],
rec = rec)
if run_spec2:
ext_input = interpolate_input(ext_input0, params, 'reduced')
results['model_results']['spec2'] = \
s2.run_spec2(ext_input, params, spec_data,
rec_vars=['wm'],
rec=rec)
if run_spec2_red:
ext_input = interpolate_input(ext_input0, params, 'reduced')
results['model_results']['spec2_red'] = \
s2_red.run_spec2_red(ext_input, params, rec_vars=params['rec_sm'],
rec=rec, filename_h5 = spec_data)
# plotting section
nr_p = plot_rates + plot_adapt + plot_input
fig = plt.figure(); pidx = 1
# plot inputs
if plot_input:
ax_mu = fig.add_subplot(nr_p, 1, pidx)
plt.plot(t_ext, mu_ext_orig, color = 'k', lw=1.5) if filter_mean else 0
line_mu_final = plt.plot(t_ext, ext_input0[0], color = 'm', lw=1.5, label='$\mu_\mathrm{final}$')
plt.ylabel('$\mu_{ext}$ [mV/ms]', fontsize=15)
ax_sig = plt.twinx()
plt.plot(t_ext, sigma_ext_orig, color = 'g', lw=1.5) if filter_std else 0
line_sig_final = plt.plot(t_ext, ext_input0[1], color = 'b', lw=1.5, label='$\sigma_\mathrm{final}$')
plt.ylabel('$\sigma_{ext}$ [$\sqrt{mV}$/ms]', fontsize=15)
plt.legend([line_mu_final[0], line_sig_final[0]],
[line_mu_final[0].get_label(), line_sig_final[0].get_label()])
pidx +=1
# plot rates
if plot_rates:
ax_rate = fig.add_subplot(nr_p, 1, pidx, sharex=ax_mu)
for model in results['model_results']:
color = params['color'][model]
lw = params['lw'][model]
time = results['model_results'][model]['t']
rates = results['model_results'][model]['r']
plt.plot(time, rates, label = model, color = color, lw=lw)
plt.ylabel('r [Hz]')
plt.legend()
pidx += 1
# plot adaptation current
if plot_adapt:
ax_adapt = fig.add_subplot(nr_p, 1, pidx, sharex=ax_mu)
for model in results['model_results']:
color = params['color'][model]
lw = params['lw'][model]
time = results['model_results'][model]['t']
wm = results['model_results'][model]['wm']
wm_shape = wm.shape
time_shape = time.shape
plt.ylabel('<wm> [pA]')
plt.plot(time, wm, color = color, lw = lw)
# plot also mean+std/mean-std if net was computed
if 'net' in results:
time = results['model_results']['net']['t']
wm = results['model_results']['net']['wm']
w_std = results['model_results']['net']['w_std']
wm_plus = wm + w_std
wm_minus = wm - w_std
plt.fill_between(time,wm_minus, wm_plus, color = 'lightpink')
if nr_p: plt.show()
|
methods-for-neuronal-network-dynamics/fokker-planck-based-spike-rate-models
|
adex_comparison/runmodels.py
|
Python
|
gpl-3.0
| 11,126
|
[
"Brian"
] |
1706e07b6c6cc91d07d47b30a092a63287dfcba42555a4ce284d5dcb358688f2
|
## Automatically adapted for numpy.oldnumeric Mar 26, 2007 by alter_code1.py
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
##
## last $Author$
## last $Date$
## $Revision$
"""
Clean PDB-files so that they can be used for MD. This module is a
(still partial) re-implementation of the vintage pdb2xplor script.
"""
import Biskit.molUtils as MU
import Biskit.mathUtils as M
import Biskit.tools as t
from Biskit.PDBModel import PDBModel
from Biskit.LogFile import StdLog
import numpy.oldnumeric as N
import copy
class CleanerError( Exception ):
pass
class CappingError( CleanerError ):
pass
class PDBCleaner:
"""
PDBCleaner performs the following tasks:
* remove HETAtoms from PDB
* replace non-standard AA by its closest standard AA
* remove non-standard atoms from standard AA residues
* delete atoms that follow missing atoms (in a chain)
* remove multiple occupancy atoms (except the one with highest occupancy)
* add ACE and NME capping residues to C- and N-terminals or chain breaks
(see capTerminals(), this is NOT done automatically in process())
Usage:
=======
>>> c = PDBCleaner( model )
>>> c.process()
>>> c.capTerminals( auto=True )
This will modify the model in-place and report changes to STDOUT.
Alternatively, you can specify a log file instance for the output.
PDBCleaner.process accepts several options to modify the processing.
Capping
=======
Capping will add N-methyl groups to free C-terminal carboxy ends
or Acetyl groups to free N-terminal Amines and will thus 'simulate' the
continuation of the protein chain -- a common practice in order to
prevent fake terminal charges. The automatic discovery of missing residues
is guess work at best. The more conservative approach is to use,
for example:
>>> c.capTerminals( breaks=1, capC=[0], capN=[2] )
In this case, only the chain break detection is used for automatic capping
-- the last residue before a chain break is capped with NME and the first
residue after the chain break is capped with ACE. Chain break detection
relies on PDBModel.chainBreaks() (via PDBModel.chainIndex( breaks=1 )).
The normal terminals to be capped are now specified explicitely. The first
chain (not counting chain breaks) will receive a NME C-terminal cap and the
third chain of the PDB will receive a N-terminal ACE cap.
Note: Dictionaries with standard residues and atom content are defined
in Biskit.molUtils. This is a duplicate effort with the new strategy
to parse Amber prep files for very similar information
(AmberResidueType, AmberResidueLibrary) and should change once we
implement a real framework for better residue handling.
"""
#: these atoms always occur at the tip of of a chain or within a ring
#: and, if missing, will not trigger the removal of other atoms
TOLERATE_MISSING = ['O', 'CG2', 'CD1', 'CD2', 'OG1', 'OE1', 'NH1',
'OD1', 'OE1',
'H5T',"O5'", ]
## PDB with ACE capping residue
F_ace_cap = t.dataRoot() + '/amber/leap/ace_cap.pdb'
## PDB with NME capping residue
F_nme_cap = t.dataRoot() + '/amber/leap/nme_cap.pdb'
def __init__( self, fpdb, log=None, verbose=True ):
"""
@param fpdb: pdb file OR PDBModel instance
@type fpdb: str OR Biskit.PDBModel
@param log: Biskit.LogFile object (default: STDOUT)
@type log: Biskit.LogFile
@param verbose: log warnings and infos (default: True)
@type verbose: bool
"""
self.model = PDBModel( fpdb )
self.log = log or StdLog()
self.verbose = verbose
def logWrite( self, msg, force=1 ):
if self.log:
self.log.add( msg )
else:
if force:
print msg
def remove_multi_occupancies( self ):
"""
Keep only atoms with alternate A field (well, or no alternate).
"""
if self.verbose:
self.logWrite( self.model.pdbCode +
': Removing multiple occupancies of atoms ...')
i = 0
to_be_removed = []
for a in self.model:
if a['alternate']:
try:
str_id = "%i %s %s %i" % (a['serial_number'], a['name'],
a['residue_name'],
a['residue_number'])
if a['alternate'].upper() == 'A':
a['alternate'] = ''
else:
if float( a['occupancy'] ) < 1.0:
to_be_removed += [ i ]
if self.verbose:
self.logWrite(
'removing %s (%s %s)' %
(str_id,a['alternate'], a['occupancy']))
else:
if self.verbose:
self.logWrite(
('keeping non-A duplicate %s because of 1.0 '+
'occupancy') % str_id )
except:
self.logWrite("Error removing duplicate: "+t.lastError() )
i+=1
try:
self.model.remove( to_be_removed )
if self.verbose:
self.logWrite('Removed %i atoms' % len( to_be_removed ) )
except:
if self.verbose:
self.logWrite('No atoms with multiple occupancies to remove' )
def replace_non_standard_AA( self, amber=0, keep=[] ):
"""
Replace amino acids with none standard names with standard
amino acids according to L{MU.nonStandardAA}
@param amber: don't rename HID, HIE, HIP, CYX, NME, ACE [0]
@type amber: 1||0
@param keep: names of additional residues to keep
@type keep: [ str ]
"""
standard = MU.atomDic.keys() + keep
if amber:
standard.extend( ['HID', 'HIE', 'HIP', 'CYX', 'NME', 'ACE'] )
replaced = 0
if self.verbose:
self.logWrite(self.model.pdbCode +
': Looking for non-standard residue names...')
resnames = self.model['residue_name']
for i in self.model.atomRange():
resname = resnames[i].upper()
if resname not in standard:
if resname in MU.nonStandardAA:
resnames[i] = MU.nonStandardAA[ resname ]
if self.verbose:
self.logWrite('renamed %s %i to %s' % \
(resname, i, MU.nonStandardAA[ resname ]))
else:
resnames[i] = 'ALA'
self.logWrite('Warning: unknown residue name %s %i: ' \
% (resname, i ) )
if self.verbose:
self.logWrite('\t->renamed to ALA.')
replaced += 1
if self.verbose:
self.logWrite('Found %i atoms with non-standard residue names.'% \
replaced )
def __standard_res( self, resname, amber=0 ):
"""
Check if resname is a standard residue (according to L{MU.atomDic})
if not return the closest standard residue (according to
L{MU.nonStandardAA}).
@param resname: 3-letter residue name
@type resname: str
@return: name of closest standard residue or resname itself
@rtype: str
"""
if resname in MU.atomDic:
return resname
if resname in MU.nonStandardAA:
return MU.nonStandardAA[ resname ]
return resname
def remove_non_standard_atoms( self ):
"""
First missing standard atom triggers removal of standard atoms that
follow in the standard order. All non-standard atoms are removed too.
Data about standard atoms are taken from L{MU.atomDic} and symomym
atom name is defined in L{MU.atomSynonyms}.
@return: number of atoms removed
@rtype: int
"""
mask = []
if self.verbose:
self.logWrite("Checking content of standard amino-acids...")
for res in self.model.resList():
resname = self.__standard_res( res[0]['residue_name'] ).upper()
if resname == 'DC5':
pass
## bugfix: ignore non-standard residues that have no matching
## standard residue
if resname in MU.atomDic:
standard = copy.copy( MU.atomDic[ resname ] )
## replace known synonyms by standard atom name
for a in res:
n = a['name']
if not n in standard and MU.atomSynonyms.get(n,0) in standard:
a['name'] = MU.atomSynonyms[n]
if self.verbose:
self.logWrite('%s: renaming %s to %s in %s %i' %\
( self.model.pdbCode, n, a['name'],
a['residue_name'], a['residue_number']))
anames = [ a['name'] for a in res ]
keep = 1
## kick out all standard atoms that follow a missing one
rm = []
for n in standard:
if (not n in anames) and not (n in self.TOLERATE_MISSING):
keep = 0
if not keep:
rm += [ n ]
for n in rm:
standard.remove( n )
## keep only atoms that are standard (and not kicked out above)
for a in res:
if a['name'] not in standard:
mask += [1]
if self.verbose:
self.logWrite('%s: removing atom %s in %s %i '%\
( self.model.pdbCode, a['name'],
a['residue_name'], a['residue_number']))
else:
mask += [0]
self.model.remove( mask )
if self.verbose:
self.logWrite('Removed ' + str(N.sum(mask)) +
' atoms because they were non-standard' +
' or followed a missing atom.' )
return N.sum( mask )
def capACE( self, model, chain, breaks=True ):
"""
Cap N-terminal of given chain.
Note: In order to allow the capping of chain breaks,
the chain index is, by default, based on model.chainIndex(breaks=True),
that means with chain break detection activated! This is not the
default behaviour of PDBModel.chainIndex or takeChains or chainLength.
Please use the wrapping method capTerminals() for more convenient
handling of the index.
@param model: model
@type model: PDBMode
@param chain: index of chain to be capped
@type chain: int
@param breaks: consider chain breaks when identifying chain boundaries
@type breaks: bool
@return: model with added NME capping
@rtype : PDBModel
"""
if self.verbose:
self.logWrite('Capping N-terminal of chain %i with ACE' % chain )
c_start = model.chainIndex( breaks=breaks )
c_end = model.chainEndIndex( breaks=breaks)
Nterm_is_break = False
Cterm_is_break = False
if breaks:
Nterm_is_break = c_start[chain] not in model.chainIndex()
Cterm_is_break = c_end[chain] not in model.chainEndIndex()
m_ace = PDBModel( self.F_ace_cap )
chains_before = model.takeChains( range(chain), breaks=breaks )
m_chain = model.takeChains( [chain], breaks=breaks )
chains_after = model.takeChains( range(chain+1, len(c_start)),
breaks=breaks )
m_term = m_chain.resModels()[0]
## we need 3 atoms for superposition, CB might mess things up but
## could help if there is no HN
## if 'HN' in m_term.atomNames():
m_ace.remove( ['CB'] ) ## use backbone 'C' rather than CB for fitting
## rename overhanging residue in cap PDB
for a in m_ace:
if a['residue_name'] != 'ACE':
a['residue_name'] = m_term.atoms['residue_name'][0]
else:
a['residue_number'] = m_term.atoms['residue_number'][0]-1
a['chain_id'] = m_term.atoms['chain_id'][0]
a['segment_id'] = m_term.atoms['segment_id'][0]
## fit cap onto first residue of chain
m_ace = m_ace.magicFit( m_term )
cap = m_ace.resModels()[0]
serial = m_term['serial_number'][0] - len(cap)
cap['serial_number'] = range( serial, serial + len(cap) )
## concat cap on chain
m_chain = cap.concat( m_chain, newChain=False )
## re-assemble whole model
r = chains_before.concat( m_chain, newChain=not Nterm_is_break)
r = r.concat( chains_after, newChain=not Cterm_is_break)
if len(c_start) != r.lenChains( breaks=breaks ):
raise CappingError, 'Capping ACE would mask a chain break. '+\
'This typically indicates a tight gap with high risk of '+\
'clashes and other issues.'
return r
def capNME( self, model, chain, breaks=True ):
"""
Cap C-terminal of given chain.
Note: In order to allow the capping of chain breaks,
the chain index is, by default, based on model.chainIndex(breaks=True),
that means with chain break detection activated! This is not the
default behaviour of PDBModel.chainIndex or takeChains or chainLength.
Please use the wrapping method capTerminals() for more convenient
handling of the index.
@param model: model
@type model: PDBMode
@param chain: index of chain to be capped
@type chain: int
@param breaks: consider chain breaks when identifying chain boundaries
@type breaks: bool
@return: model with added NME capping residue
@rtype : PDBModel
"""
if self.verbose:
self.logWrite('Capping C-terminal of chain %i with NME.' % chain )
m_nme = PDBModel( self.F_nme_cap )
c_start = model.chainIndex( breaks=breaks )
c_end = model.chainEndIndex( breaks=breaks)
Nterm_is_break = False
Cterm_is_break = False
if breaks:
Nterm_is_break = c_start[chain] not in model.chainIndex()
Cterm_is_break = c_end[chain] not in model.chainEndIndex()
chains_before = model.takeChains( range(chain), breaks=breaks )
m_chain = model.takeChains( [chain], breaks=breaks )
chains_after = model.takeChains( range(chain+1, len(c_start)),
breaks=breaks )
m_term = m_chain.resModels()[-1]
## rename overhanging residue in cap PDB, renumber cap residue
for a in m_nme:
if a['residue_name'] != 'NME':
a['residue_name'] = m_term.atoms['residue_name'][0]
else:
a['residue_number'] = m_term.atoms['residue_number'][0]+1
a['chain_id'] = m_term.atoms['chain_id'][0]
a['segment_id'] = m_term.atoms['segment_id'][0]
## chain should not have any terminal O after capping
m_chain.remove( ['OXT'] )
## fit cap onto last residue of chain
m_nme = m_nme.magicFit( m_term )
cap = m_nme.resModels()[-1]
serial = m_term['serial_number'][-1]+1
cap['serial_number'] = range( serial, serial + len(cap) )
## concat cap on chain
m_chain = m_chain.concat( cap, newChain=False )
## should be obsolete now
if getattr( m_chain, '_PDBModel__terAtoms', []) != []:
m_chain._PDBModel__terAtoms = [ len( m_chain ) - 1 ]
assert m_chain.lenChains() == 1
## re-assemble whole model
r = chains_before.concat( m_chain, newChain=not Nterm_is_break)
r = r.concat( chains_after, newChain=not Cterm_is_break)
if len(c_start) != r.lenChains( breaks=breaks ):
raise CappingError, 'Capping NME would mask a chain break. '+\
'This typically indicates a tight gap with high risk of '+\
'clashes and other issues.'
return r
def convertChainIdsNter( self, model, chains ):
"""
Convert normal chain ids to chain ids considering chain breaks.
"""
if len(chains) == 0:
return chains
i = N.take( model.chainIndex(), chains )
## convert back to chain indices but this time including chain breaks
return model.atom2chainIndices( i, breaks=1 )
def convertChainIdsCter( self, model, chains ):
"""
Convert normal chain ids to chain ids considering chain breaks.
"""
if len(chains) == 0:
return chains
## fetch last atom of given chains
index = N.concatenate( (model.chainIndex(), [len(model)]) )
i = N.take( index, N.array( chains ) + 1 ) - 1
## convert back to chain indices but this time including chain breaks
return model.atom2chainIndices( i, breaks=1 )
def unresolvedTerminals( self, model ):
"""
Autodetect (aka "guess") which N- and C-terminals are most likely not
the real end of each chain. This guess work is based on residue
numbering:
* unresolved N-terminal: a protein residue with a residue number > 1
* unresolved C-terminal: a protein residue that does not contain either
OXT or OT or OT1 or OT2 atoms
@param model: PDBModel
@return: chains with unresolved N-term, with unresolved C-term
@rtype : ([int], [int])
"""
c_first = model.chainIndex()
c_last = model.chainEndIndex()
capN = [ i for (i,pos) in enumerate(c_first)\
if model['residue_number'][pos] > 1 ]
capN = [i for i in capN if model['residue_name'][c_first[i]] != 'ACE']
capN = self.filterProteinChains( model, capN, c_first )
capC = []
for (i,pos) in enumerate(c_last):
atoms = model.takeResidues(model.atom2resIndices([pos])).atomNames()
if not( 'OXT' in atoms or 'OT' in atoms or 'OT1' in atoms or \
'OT2' in atoms ):
capC += [ i ]
capC = self.filterProteinChains( model, capC, c_last )
return capN, capC
#@todo filter for protein positions in breaks=1
def filterProteinChains( self, model, chains, chainindex ):
maskProtein = model.maskProtein()
chains = [ i for i in chains if maskProtein[ chainindex[i] ] ]
return chains
def capTerminals( self, auto=False, breaks=False, capN=[], capC=[] ):
"""
Add NME and ACE capping residues to chain breaks or normal N- and
C-terminals. Note: these capping residues contain hydrogen atoms.
Chain indices for capN and capC arguments can be interpreted either
with or without chain break detection enabled. For example, let's
assume we have a two-chain protein with some missing residues (chain
break) in the first chain:
A: MGSKVSK---FLNAGSK
B: FGHLAKSDAK
Then:
capTerminals( breaks=False, capN=[1], capC=[1]) will add N-and
C-terminal caps to chain B.
However:
capTerminals( breaks=True, capN=[1], capC=[1]) will add N- and
C-terminal caps to the second fragment of chain A.
Note: this operation *replaces* the internal model.
@param auto: put ACE and NME capping residue on chain breaks
and on suspected false N- and C-termini (default: False)
@type auto: bool
@param breaks: switch on chain break detection before interpreting
capN and capC
@type breaks: False
@param capN: indices of chains that should get ACE cap (default: [])
@type capN: [int]
@param capC: indices of chains that should get NME cap (default: [])
@type capC: [int]
"""
m = self.model
c_len = m.lenChains()
i_breaks = m.chainBreaks()
if auto:
if not breaks:
capN = self.convertChainIdsNter( m, capN )
capC = self.convertChainIdsCter( m, capC )
breaks=True
capN, capC = self.unresolvedTerminals( m )
end_broken = m.atom2chainIndices( m.chainBreaks(), breaks=1 )
capC = M.union( capC, end_broken )
capN = M.union( capN, N.array( end_broken ) + 1 )
capN = self.filterProteinChains(m, capN, m.chainIndex(breaks=breaks))
capC = self.filterProteinChains(m, capC, m.chainEndIndex(breaks=breaks))
for i in capN:
m = self.capACE( m, i, breaks=breaks )
assert m.lenChains() == c_len, '%i != %i' % \
(m.lenChains(), c_len)
assert len(m.chainBreaks(force=True)) == len(i_breaks)
for i in capC:
m = self.capNME( m, i, breaks=breaks )
assert m.lenChains() == c_len
assert len(m.chainBreaks(force=True)) == len(i_breaks)
self.model = m
return self.model
def process( self, keep_hetatoms=0, amber=0, keep_xaa=[] ):
"""
Remove Hetatoms, waters. Replace non-standard names.
Remove non-standard atoms.
@param keep_hetatoms: option
@type keep_hetatoms: 0||1
@param amber: don't rename amber residue names (HIE, HID, CYX,..)
@type amber: 0||1
@param keep_xaa: names of non-standard residues to be kept
@type keep_xaa: [ str ]
@return: PDBModel (reference to internal)
@rtype: PDBModel
@raise CleanerError: if something doesn't go as expected ...
"""
try:
if not keep_hetatoms:
self.model.remove( self.model.maskHetatm() )
self.model.remove( self.model.maskH2O() )
self.model.remove( self.model.maskH() )
self.remove_multi_occupancies()
self.replace_non_standard_AA( amber=amber, keep=keep_xaa )
self.remove_non_standard_atoms()
except KeyboardInterrupt, why:
raise KeyboardInterrupt( why )
except Exception, why:
self.logWrite('Error: '+t.lastErrorTrace())
raise CleanerError( 'Error cleaning model: %r' % why )
return self.model
#############
## TESTING
#############
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Test class """
def prepare(self):
from Biskit.LogFile import LogFile
import tempfile
def test_PDBCleaner( self ):
"""PDBCleaner general test"""
## Loading PDB...
self.c = PDBCleaner( t.testRoot() + '/rec/1A2P_rec_original.pdb',
log=self.log,
verbose=self.local)
self.m = self.c.process()
self.assertAlmostEqual( self.m.mass(), 34029.0115499993, 7 )
def test_DNACleaning( self ):
"""PDBCleaner DNA test"""
## Loading PDB...
self.c = PDBCleaner( t.testRoot() + 'amber/entropy/0_com.pdb',
log=self.log, verbose=self.local )
self.dna = self.c.process(amber=True)
self.assertAlmostEqual( self.dna.mass(), 26953.26, 1 )
def test_Capping( self ):
"""PDBCleaner.capTerminals test"""
## Loading PDB...
self.model = PDBModel(t.testRoot() + '/rec/1A2P_rec_original.pdb')
self.c = PDBCleaner( self.model, log=self.log, verbose=self.local )
self.m2 = self.c.capTerminals( breaks=True )
self.assert_( self.m2.atomNames() == self.model.atomNames() )
self.m3 = self.model.clone()
self.m3.removeRes( [10,11,12,13,14,15] )
self.m4 = self.m3.clone()
self.c = PDBCleaner( self.m3, log=self.log, verbose=self.local )
self.m3 = self.c.capTerminals( breaks=True, capC=[0], capN=[0,1])
self.assertEqual( self.m3.takeChains([0]).sequence()[:18],
'XVINTFDGVADXXKLPDN' )
if self.local:
self.log.add( '\nTesting automatic chain capping...\n' )
self.c = PDBCleaner( self.m4, log=self.log, verbose=self.local )
self.m4 = self.c.capTerminals( auto=True )
self.assertEqual( self.m4.takeChains([0]).sequence()[:18],
'XVINTFDGVADXXKLPDN' )
def test_capping_extra( self ):
"""PDBCleaner.capTerminals extra challenge"""
self.m2 = PDBModel( t.testRoot() + '/pdbclean/foldx_citche.pdb' )
self.c = PDBCleaner( self.m2, verbose=self.local, log=self.log)
self.assertRaises(CappingError, self.c.capTerminals, auto=True)
if self.local:
self.log.add('OK: CappingError has been raised indicating clash.' )
self.assertEqual( len(self.m2.takeChains([1]).chainBreaks()), 1 )
if __name__ == '__main__':
BT.localTest()
|
ostrokach/biskit
|
Biskit/PDBCleaner.py
|
Python
|
gpl-3.0
| 27,041
|
[
"Amber"
] |
190614de849baac4402e04ec17da356c3bbe134ceb5c668041373f228ca2f876
|
# Copyright 2006 by Sean Davis. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# $Id: __init__.py,v 1.12 2009-04-24 12:03:45 mdehoon Exp $
# Sean Davis <sdavis2 at mail dot nih dot gov>
# National Cancer Institute
# National Institutes of Health
# Bethesda, MD, USA
#
"""Parse Unigene flat file format files such as the Hs.data file.
Here is an overview of the flat file format that this parser deals with:
Line types/qualifiers:
ID UniGene cluster ID
TITLE Title for the cluster
GENE Gene symbol
CYTOBAND Cytological band
EXPRESS Tissues of origin for ESTs in cluster
RESTR_EXPR Single tissue or development stage contributes
more than half the total EST frequency for this gene.
GNM_TERMINUS genomic confirmation of presence of a 3' terminus;
T if a non-templated polyA tail is found among
a cluster's sequences; else
I if templated As are found in genomic sequence or
S if a canonical polyA signal is found on
the genomic sequence
GENE_ID Entrez gene identifier associated with at least one
sequence in this cluster;
to be used instead of LocusLink.
LOCUSLINK LocusLink identifier associated with at least one
sequence in this cluster;
deprecated in favor of GENE_ID
HOMOL Homology;
CHROMOSOME Chromosome. For plants, CHROMOSOME refers to mapping
on the arabidopsis genome.
STS STS
ACC= GenBank/EMBL/DDBJ accession number of STS
[optional field]
UNISTS= identifier in NCBI's UNISTS database
TXMAP Transcript map interval
MARKER= Marker found on at least one sequence in this
cluster
RHPANEL= Radiation Hybrid panel used to place marker
PROTSIM Protein Similarity data for the sequence with
highest-scoring protein similarity in this cluster
ORG= Organism
PROTGI= Sequence GI of protein
PROTID= Sequence ID of protein
PCT= Percent alignment
ALN= length of aligned region (aa)
SCOUNT Number of sequences in the cluster
SEQUENCE Sequence
ACC= GenBank/EMBL/DDBJ accession number of sequence
NID= Unique nucleotide sequence identifier (gi)
PID= Unique protein sequence identifier (used for
non-ESTs)
CLONE= Clone identifier (used for ESTs only)
END= End (5'/3') of clone insert read (used for
ESTs only)
LID= Library ID; see Hs.lib.info for library name
and tissue
MGC= 5' CDS-completeness indicator; if present, the
clone associated with this sequence is believed
CDS-complete. A value greater than 511 is the gi
of the CDS-complete mRNA matched by the EST,
otherwise the value is an indicator of the
reliability of the test indicating CDS
completeness; higher values indicate more
reliable CDS-completeness predictions.
SEQTYPE= Description of the nucleotide sequence.
Possible values are mRNA, EST and HTC.
TRACE= The Trace ID of the EST sequence, as provided by
NCBI Trace Archive
"""
class SequenceLine(object):
"""Store the information for one SEQUENCE line from a Unigene file
Initialize with the text part of the SEQUENCE line, or nothing.
Attributes and descriptions (access as LOWER CASE)
ACC= GenBank/EMBL/DDBJ accession number of sequence
NID= Unique nucleotide sequence identifier (gi)
PID= Unique protein sequence identifier (used for non-ESTs)
CLONE= Clone identifier (used for ESTs only)
END= End (5'/3') of clone insert read (used for ESTs only)
LID= Library ID; see Hs.lib.info for library name and tissue
MGC= 5' CDS-completeness indicator; if present,
the clone associated with this sequence
is believed CDS-complete. A value greater than 511
is the gi of the CDS-complete mRNA matched by the EST,
otherwise the value is an indicator of the reliability
of the test indicating CDS completeness;
higher values indicate more reliable CDS-completeness
predictions.
SEQTYPE= Description of the nucleotide sequence. Possible values
are mRNA, EST and HTC.
TRACE= The Trace ID of the EST sequence, as provided by NCBI
Trace Archive
"""
def __init__(self,text=None):
self.acc = ''
self.nid = ''
self.lid = ''
self.pid = ''
self.clone = ''
self.image = ''
self.is_image = False
self.end = ''
self.mgc = ''
self.seqtype = ''
self.trace = ''
if not text==None:
self.text=text
self._init_from_text(text)
def _init_from_text(self,text):
parts = text.split('; ');
for part in parts:
key, val = part.split("=")
if key=='CLONE':
if val[:5]=='IMAGE':
self.is_image=True
self.image = val[6:]
setattr(self,key.lower(),val)
def __repr__(self):
return self.text
class ProtsimLine(object):
"""Store the information for one PROTSIM line from a Unigene file
Initialize with the text part of the PROTSIM line, or nothing.
Attributes and descriptions (access as LOWER CASE)
ORG= Organism
PROTGI= Sequence GI of protein
PROTID= Sequence ID of protein
PCT= Percent alignment
ALN= length of aligned region (aa)
"""
def __init__(self,text=None):
self.org = ''
self.protgi = ''
self.protid = ''
self.pct = ''
self.aln = ''
if not text==None:
self.text=text
self._init_from_text(text)
def _init_from_text(self,text):
parts = text.split('; ');
for part in parts:
key, val = part.split("=")
setattr(self,key.lower(),val)
def __repr__(self):
return self.text
class STSLine(object):
"""Store the information for one STS line from a Unigene file
Initialize with the text part of the STS line, or nothing.
Attributes and descriptions (access as LOWER CASE)
ACC= GenBank/EMBL/DDBJ accession number of STS [optional field]
UNISTS= identifier in NCBI's UNISTS database
"""
def __init__(self,text=None):
self.acc = ''
self.unists = ''
if not text==None:
self.text=text
self._init_from_text(text)
def _init_from_text(self,text):
parts = text.split(' ');
for part in parts:
key, val = part.split("=")
setattr(self,key.lower(),val)
def __repr__(self):
return self.text
class Record(object):
"""Store a Unigene record
Here is what is stored:
self.ID = '' # ID line
self.species = '' # Hs, Bt, etc.
self.title = '' # TITLE line
self.symbol = '' # GENE line
self.cytoband = '' # CYTOBAND line
self.express = [] # EXPRESS line, parsed on ';'
# Will be an array of strings
self.restr_expr = '' # RESTR_EXPR line
self.gnm_terminus = '' # GNM_TERMINUS line
self.gene_id = '' # GENE_ID line
self.locuslink = '' # LOCUSLINK line
self.homol = '' # HOMOL line
self.chromosome = '' # CHROMOSOME line
self.protsim = [] # PROTSIM entries, array of Protsims
# Type ProtsimLine
self.sequence = [] # SEQUENCE entries, array of Sequence entries
# Type SequenceLine
self.sts = [] # STS entries, array of STS entries
# Type STSLine
self.txmap = [] # TXMAP entries, array of TXMap entries
"""
def __init__(self):
self.ID = '' # ID line
self.species = '' # Hs, Bt, etc.
self.title = '' # TITLE line
self.symbol = '' # GENE line
self.cytoband = '' # CYTOBAND line
self.express = [] # EXPRESS line, parsed on ';'
self.restr_expr = '' # RESTR_EXPR line
self.gnm_terminus = '' # GNM_TERMINUS line
self.gene_id = '' # GENE_ID line
self.locuslink = '' # LOCUSLINK line
self.homol = '' # HOMOL line
self.chromosome = '' # CHROMOSOME line
self.protsim = [] # PROTSIM entries, array of Protsims
self.sequence = [] # SEQUENCE entries, array of Sequence entries
self.sts = [] # STS entries, array of STS entries
self.txmap = [] # TXMAP entries, array of TXMap entries
def __repr__(self):
return "<%s> %s %s\n%s" % (self.__class__.__name__,
self.ID, self.symbol, self.title)
def parse(handle):
while True:
record = _read(handle)
if not record:
return
yield record
def read(handle):
record = _read(handle)
if not record:
raise ValueError("No SwissProt record found")
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one SwissProt record found")
return record
# Everything below is private
def _read(handle):
UG_INDENT = 12
record = None
for line in handle:
tag, value = line[:UG_INDENT].rstrip(), line[UG_INDENT:].rstrip()
line = line.rstrip()
if tag=="ID":
record = Record()
record.ID = value
record.species = record.ID.split('.')[0]
elif tag=="TITLE":
record.title = value
elif tag=="GENE":
record.symbol = value
elif tag=="GENE_ID":
record.gene_id = value
elif tag=="LOCUSLINK":
record.locuslink = value
elif tag=="HOMOL":
if value=="YES":
record.homol = True
elif value=="NO":
record.homol = True
else:
raise ValueError, "Cannot parse HOMOL line %s" % line
elif tag=="EXPRESS":
record.express = [word.strip() for word in value.split("|")]
elif tag=="RESTR_EXPR":
record.restr_expr = [word.strip() for word in value.split("|")]
elif tag=="CHROMOSOME":
record.chromosome = value
elif tag=="CYTOBAND":
record.cytoband = value
elif tag=="PROTSIM":
protsim = ProtsimLine(value)
record.protsim.append(protsim)
elif tag=="SCOUNT":
scount = int(value)
elif tag=="SEQUENCE":
sequence = SequenceLine(value)
record.sequence.append(sequence)
elif tag=="STS":
sts = STSLine(value)
record.sts.append(sts)
elif tag=='//':
if len(record.sequence)!=scount:
raise ValueError, "The number of sequences specified in the record (%d) does not agree with the number of sequences found (%d)" % (scount, len(record.sequence))
return record
else:
raise ValueError, "Unknown tag %s" % tag
if record:
raise ValueError("Unexpected end of stream.")
|
bryback/quickseq
|
genescript/Bio/UniGene/__init__.py
|
Python
|
mit
| 12,406
|
[
"Biopython"
] |
b7a157fad97212817adcf4ea8caca7250c18d0486ca0e81ce2029ef17fad0cd5
|
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import six
from .tree import Node
class Q(Node):
"""
Encapsulates filters as objects that can then be combined logically
(using ``&`` and ``|``).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(
children=list(args) + list(six.iteritems(kwargs)))
def _combine(self, other: 'Q', conn: str) -> 'Q':
if not isinstance(other, Q):
raise TypeError(other)
if len(self.children) < 1:
self.connector = conn
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other: 'Q'):
return self._combine(other, self.OR)
def __and__(self, other: 'Q'):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
|
Karaage-Cluster/python-tldap
|
tldap/query_utils.py
|
Python
|
gpl-3.0
| 1,740
|
[
"Brian"
] |
f753d2496be86381a6c9c263528e871aba13f36ffa2fb0b9c499611c266bbb9a
|
"""Sub-classes and wrappers for vtk.vtkPointSet."""
from textwrap import dedent
import pathlib
import logging
import os
import warnings
import numbers
import collections
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.utilities import abstract_class
from pyvista.utilities.cells import (CellArray, numpy_to_idarr,
generate_cell_offsets,
create_mixed_cells,
get_mixed_cells)
from .dataset import DataSet
from .filters import (PolyDataFilters, UnstructuredGridFilters,
StructuredGridFilters, _get_output)
from ..utilities.fileio import get_ext
from .errors import DeprecationError
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
class PointSet(DataSet):
"""PyVista's equivalent of vtk.vtkPointSet.
This holds methods common to PolyData and UnstructuredGrid.
"""
def center_of_mass(self, scalars_weight=False):
"""Return the coordinates for the center of mass of the mesh.
Parameters
----------
scalars_weight : bool, optional
Flag for using the mesh scalars as weights. Defaults to False.
Returns
-------
center : np.ndarray, float
Coordinates for the center of mass.
"""
alg = _vtk.vtkCenterOfMass()
alg.SetInputDataObject(self)
alg.SetUseScalarsAsWeights(scalars_weight)
alg.Update()
return np.array(alg.GetCenter())
def shallow_copy(self, to_copy):
"""Do a shallow copy the pointset."""
# Set default points if needed
if not to_copy.GetPoints():
to_copy.SetPoints(_vtk.vtkPoints())
return DataSet.shallow_copy(self, to_copy)
def remove_cells(self, ind, inplace=True):
"""Remove cells.
Parameters
----------
ind : iterable
Cell indices to be removed. The array can also be a
boolean array of the same size as the number of cells.
inplace : bool, optional
Updates mesh in-place while returning nothing when ``True``.
Examples
--------
Remove first 1000 cells from an unstructured grid.
>>> import pyvista
>>> letter_a = pyvista.examples.download_letter_a()
>>> trimmed = letter_a.remove_cells(range(1000))
"""
if isinstance(ind, np.ndarray):
if ind.dtype == np.bool_ and ind.size != self.n_cells:
raise ValueError('Boolean array size must match the '
f'number of cells ({self.n_cells}')
ghost_cells = np.zeros(self.n_cells, np.uint8)
ghost_cells[ind] = _vtk.vtkDataSetAttributes.DUPLICATECELL
if inplace:
target = self
else:
target = self.copy()
target.cell_arrays[_vtk.vtkDataSetAttributes.GhostArrayName()] = ghost_cells
target.RemoveGhostCells()
return target
class PolyData(_vtk.vtkPolyData, PointSet, PolyDataFilters):
"""Extend the functionality of a vtk.vtkPolyData object.
Can be initialized in several ways:
- Create an empty mesh
- Initialize from a vtk.vtkPolyData
- Using vertices
- Using vertices and faces
- From a file
Parameters
----------
var_inp : vtk.vtkPolyData, str, sequence, optional
Flexible input type. Can be a ``vtk.vtkPolyData``, in which case
this PolyData object will be copied if ``deep=True`` and will
be a shallow copy if ``deep=False``.
Also accepts a path, which may be local path as in
``'my_mesh.stl'`` or global path like ``'/tmp/my_mesh.ply'``
or ``'C:/Users/user/my_mesh.ply'``.
Otherwise, this must be a points array or list containing one
or more points. Each point must have 3 dimensions.
faces : sequence, optional
Face connectivity array. Faces must contain padding
indicating the number of points in the face. For example, the
two faces ``[10, 11, 12]`` and ``[20, 21, 22, 23]`` will be
represented as ``[3, 10, 11, 12, 4, 20, 21, 22, 23]``. This
lets you have an arbitrary number of points per face.
When not including the face connectivity array, each point
will be assigned to a single vertex. This is used for point
clouds that have no connectivity.
n_faces : int, optional
Number of faces in the ``faces`` connectivity array. While
optional, setting this speeds up the creation of the
``PolyData``.
lines : sequence, optional
The line connectivity array. Like ``faces``, this array
requires padding indicating the number of points in a line
segment. For example, the two line segments ``[0, 1]`` and
``[1, 2, 3, 4]`` will be represented as
``[2, 0, 1, 4, 1, 2, 3, 4]``.
n_lines : int, optional
Number of lines in the ``lines`` connectivity array. While
optional, setting this speeds up the creation of the
``PolyData``.
deep : bool, optional
Whether to copy the inputs, or to create a mesh from them
without copying them. Setting ``deep=True`` ensures that the
original arrays can be modified outside the mesh without
affecting the mesh. Default is ``False``.
Examples
--------
>>> import vtk
>>> import numpy as np
>>> from pyvista import examples
>>> import pyvista
Create an empty mesh
>>> mesh = pyvista.PolyData()
Initialize from a ``vtk.vtkPolyData`` object
>>> vtkobj = vtk.vtkPolyData()
>>> mesh = pyvista.PolyData(vtkobj)
Initialize from just vertices
>>> vertices = np.array([[0, 0, 0], [1, 0, 0], [1, 0.5, 0], [0, 0.5, 0]])
>>> mesh = pyvista.PolyData(vertices)
Initialize from vertices and faces
>>> faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2]])
>>> mesh = pyvista.PolyData(vertices, faces)
Initialize from vertices and lines
>>> lines = np.hstack([[2, 0, 1], [2, 1, 2]])
>>> mesh = pyvista.PolyData(vertices, lines=lines)
Initialize from a filename
>>> mesh = pyvista.PolyData(examples.antfile)
"""
_WRITERS = {'.ply': _vtk.vtkPLYWriter,
'.vtp': _vtk.vtkXMLPolyDataWriter,
'.stl': _vtk.vtkSTLWriter,
'.vtk': _vtk.vtkPolyDataWriter}
def __init__(self, var_inp=None, faces=None, n_faces=None, lines=None,
n_lines=None, deep=False, force_ext=None) -> None:
"""Initialize the polydata."""
local_parms = locals()
super().__init__()
# allow empty input
if var_inp is None:
return
# filename
opt_kwarg = ['faces', 'n_faces', 'lines', 'n_lines']
if isinstance(var_inp, (str, pathlib.Path)):
for kwarg in opt_kwarg:
if local_parms[kwarg]:
raise ValueError('No other arguments should be set when first '
'parameter is a string')
self._from_file(var_inp, force_ext=force_ext) # is filename
return
# PolyData-like
if isinstance(var_inp, _vtk.vtkPolyData):
for kwarg in opt_kwarg:
if local_parms[kwarg]:
raise ValueError('No other arguments should be set when first '
'parameter is a PolyData')
if deep:
self.deep_copy(var_inp)
else:
self.shallow_copy(var_inp)
return
# First parameter is points
if isinstance(var_inp, (np.ndarray, list)):
self.SetPoints(pyvista.vtk_points(var_inp, deep=deep))
else:
msg = f"""
Invalid Input type:
Expected first argument to be either a:
- vtk.PolyData
- pyvista.PolyData
- numeric numpy.ndarray (1 or 2 dimensions)
- List (flat or nested with 3 points per vertex)
Instead got: {type(var_inp)}"""
raise TypeError(dedent(msg.strip('\n')))
# At this point, points have been setup, add faces and/or lines
if faces is None and lines is None:
# one cell per point (point cloud case)
verts = self._make_vertex_cells(self.n_points)
self.verts = CellArray(verts, self.n_points, deep)
elif faces is not None:
# here we use CellArray since we must specify deep and n_faces
self.faces = CellArray(faces, n_faces, deep)
# can always set lines
if lines is not None:
# here we use CellArray since we must specify deep and n_lines
self.lines = CellArray(lines, n_lines, deep)
def _post_file_load_processing(self):
"""Execute after loading a PolyData from file."""
# When loading files with just point arrays, create and
# set the polydata vertices
if self.n_points > 0 and self.n_cells == 0:
verts = self._make_vertex_cells(self.n_points)
self.verts = CellArray(verts, self.n_points, deep=False)
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard str representation."""
return DataSet.__str__(self)
@staticmethod
def _make_vertex_cells(npoints):
cells = np.empty((npoints, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npoints, dtype=pyvista.ID_TYPE)
return cells
@property
def verts(self):
"""Get the vertex cells."""
return _vtk.vtk_to_numpy(self.GetVerts().GetData())
@verts.setter
def verts(self, verts):
"""Set the vertex cells."""
if isinstance(verts, CellArray):
self.SetVerts(verts)
else:
self.SetVerts(CellArray(verts))
@property
def lines(self):
"""Return a pointer to the lines as a numpy object."""
return _vtk.vtk_to_numpy(self.GetLines().GetData()).ravel()
@lines.setter
def lines(self, lines):
"""Set the lines of the polydata."""
if isinstance(lines, CellArray):
self.SetLines(lines)
else:
self.SetLines(CellArray(lines))
@property
def faces(self):
"""Return a pointer to the faces as a numpy object."""
return _vtk.vtk_to_numpy(self.GetPolys().GetData())
@faces.setter
def faces(self, faces):
"""Set the face cells."""
if isinstance(faces, CellArray):
self.SetPolys(faces)
else:
self.SetPolys(CellArray(faces))
def is_all_triangles(self):
"""Return ``True`` if all the faces of the ``PolyData`` are triangles."""
# Need to make sure there are only face cells and no lines/verts
faces = self.faces # grab once as this takes time to build
if not len(faces) or len(self.lines) > 0 or len(self.verts) > 0:
return False
# All we have are faces, check if all faces are indeed triangles
if faces.size % 4 == 0:
return (faces[::4] == 3).all()
return False
def __sub__(self, cutting_mesh):
"""Subtract two meshes."""
return self.boolean_cut(cutting_mesh)
@property
def n_faces(self):
"""Return the number of cells.
Alias for ``n_cells``.
"""
return self.n_cells
@property
def number_of_faces(self): # pragma: no cover
"""Return the number of cells."""
raise DeprecationError('``number_of_faces`` has been depreciated. '
'Please use ``n_faces``')
def save(self, filename, binary=True):
"""Write a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh
file. If ply or stl format is chosen, the face normals are
computed in place to ensure the mesh is properly saved.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.ply, .stl,
.vtk)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
"""
filename = os.path.abspath(os.path.expanduser(str(filename)))
ftype = get_ext(filename)
# Recompute normals prior to save. Corrects a bug were some
# triangular meshes are not saved correctly
if ftype in ['stl', 'ply']:
self.compute_normals(inplace=True)
super().save(filename, binary)
@property
def area(self):
"""Return the mesh surface area.
Returns
-------
area : float
Total area of the mesh.
"""
areas = self.compute_cell_sizes(length=False, area=True, volume=False,)["Area"]
return np.sum(areas)
@property
def volume(self):
"""Return the mesh volume.
This will throw a VTK error/warning if not a closed surface
Returns
-------
volume : float
Total volume of the mesh.
"""
mprop = _vtk.vtkMassProperties()
mprop.SetInputData(self.triangulate())
return mprop.GetVolume()
@property
def point_normals(self):
"""Return the point normals."""
mesh = self.compute_normals(cell_normals=False, inplace=False)
return mesh.point_arrays['Normals']
@property
def cell_normals(self):
"""Return the cell normals."""
mesh = self.compute_normals(point_normals=False, inplace=False)
return mesh.cell_arrays['Normals']
@property
def face_normals(self):
"""Return the cell normals."""
return self.cell_normals
@property
def obbTree(self):
"""Return the obbTree of the polydata.
An obbTree is an object to generate oriented bounding box (OBB)
trees. An oriented bounding box is a bounding box that does not
necessarily line up along coordinate axes. The OBB tree is a
hierarchical tree structure of such boxes, where deeper levels of OBB
confine smaller regions of space.
"""
if not hasattr(self, '_obbTree'):
self._obbTree = _vtk.vtkOBBTree()
self._obbTree.SetDataSet(self)
self._obbTree.BuildLocator()
return self._obbTree
@property
def n_open_edges(self):
"""Return the number of open edges on this mesh."""
alg = _vtk.vtkFeatureEdges()
alg.FeatureEdgesOff()
alg.BoundaryEdgesOn()
alg.NonManifoldEdgesOn()
alg.SetInputDataObject(self)
alg.Update()
return alg.GetOutput().GetNumberOfCells()
def __del__(self):
"""Delete the object."""
if hasattr(self, '_obbTree'):
del self._obbTree
@abstract_class
class PointGrid(PointSet):
"""Class in common with structured and unstructured grids."""
def __init__(self, *args, **kwargs) -> None:
"""Initialize the point grid."""
super().__init__()
def plot_curvature(self, curv_type='mean', **kwargs):
"""Plot the curvature of the external surface of the grid.
Parameters
----------
curv_type : str, optional
One of the following strings indicating curvature types
- mean
- gaussian
- maximum
- minimum
**kwargs : optional
Optional keyword arguments. See help(pyvista.plot)
Returns
-------
cpos : list
Camera position, focal point, and view up. Used for storing and
setting camera view.
"""
trisurf = self.extract_surface().triangulate()
return trisurf.plot_curvature(curv_type, **kwargs)
@property
def volume(self):
"""Compute the volume of the point grid.
This extracts the external surface and computes the interior volume
"""
surf = self.extract_surface().triangulate()
return surf.volume
class UnstructuredGrid(_vtk.vtkUnstructuredGrid, PointGrid, UnstructuredGridFilters):
"""
Extends the functionality of a vtk.vtkUnstructuredGrid object.
Can be initialized by the following:
- Creating an empty grid
- From a vtk.vtkPolyData object
- From cell, offset, and node arrays
- From a file
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> import vtk
Create an empty grid
>>> grid = pyvista.UnstructuredGrid()
Copy a vtkUnstructuredGrid
>>> vtkgrid = vtk.vtkUnstructuredGrid()
>>> grid = pyvista.UnstructuredGrid(vtkgrid) # Initialize from a vtkUnstructuredGrid
>>> # from arrays (vtk9)
>>> #grid = pyvista.UnstructuredGrid(cells, celltypes, points)
>>> # from arrays (vtk<9)
>>> #grid = pyvista.UnstructuredGrid(offset, cells, celltypes, points)
From a string filename
>>> grid = pyvista.UnstructuredGrid(examples.hexbeamfile)
"""
_WRITERS = {'.vtu': _vtk.vtkXMLUnstructuredGridWriter,
'.vtk': _vtk.vtkUnstructuredGridWriter}
def __init__(self, *args, **kwargs) -> None:
"""Initialize the unstructured grid."""
super().__init__()
deep = kwargs.pop('deep', False)
if not len(args):
return
if len(args) == 1:
if isinstance(args[0], _vtk.vtkUnstructuredGrid):
if deep:
self.deep_copy(args[0])
else:
self.shallow_copy(args[0])
elif isinstance(args[0], (str, pathlib.Path)):
self._from_file(args[0], **kwargs)
elif isinstance(args[0], _vtk.vtkStructuredGrid):
vtkappend = _vtk.vtkAppendFilter()
vtkappend.AddInputData(args[0])
vtkappend.Update()
self.shallow_copy(vtkappend.GetOutput())
else:
itype = type(args[0])
raise TypeError(f'Cannot work with input type {itype}')
# Cell dictionary creation
elif len(args) == 2 and isinstance(args[0], dict) and isinstance(args[1], np.ndarray):
self._from_cells_dict(args[0], args[1], deep)
self._check_for_consistency()
elif len(args) == 3: # and VTK9:
arg0_is_arr = isinstance(args[0], np.ndarray)
arg1_is_arr = isinstance(args[1], np.ndarray)
arg2_is_arr = isinstance(args[2], np.ndarray)
if all([arg0_is_arr, arg1_is_arr, arg2_is_arr]):
self._from_arrays(None, args[0], args[1], args[2], deep)
self._check_for_consistency()
else:
raise TypeError('All input types must be np.ndarray')
elif len(args) == 4:
arg0_is_arr = isinstance(args[0], np.ndarray)
arg1_is_arr = isinstance(args[1], np.ndarray)
arg2_is_arr = isinstance(args[2], np.ndarray)
arg3_is_arr = isinstance(args[3], np.ndarray)
if all([arg0_is_arr, arg1_is_arr, arg2_is_arr, arg3_is_arr]):
self._from_arrays(args[0], args[1], args[2], args[3], deep)
self._check_for_consistency()
else:
raise TypeError('All input types must be np.ndarray')
else:
err_msg = 'Invalid parameters. Initialization with arrays ' +\
'requires the following arrays:\n'
if _vtk.VTK9:
raise TypeError(err_msg + '`cells`, `cell_type`, `points`')
else:
raise TypeError(err_msg + '(`offset` optional), `cells`, `cell_type`, `points`')
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard str representation."""
return DataSet.__str__(self)
def _from_cells_dict(self, cells_dict, points, deep=True):
if points.ndim != 2 or points.shape[-1] != 3:
raise ValueError("Points array must be a [M, 3] array")
nr_points = points.shape[0]
if _vtk.VTK9:
cell_types, cells = create_mixed_cells(cells_dict, nr_points)
self._from_arrays(None, cells, cell_types, points, deep=deep)
else:
cell_types, cells, offset = create_mixed_cells(cells_dict, nr_points)
self._from_arrays(offset, cells, cell_types, points, deep=deep)
def _from_arrays(self, offset, cells, cell_type, points, deep=True):
"""Create VTK unstructured grid from numpy arrays.
Parameters
----------
offset : np.ndarray dtype=np.int64
Array indicating the start location of each cell in the cells
array. Set to ``None`` when using VTK 9+.
cells : np.ndarray dtype=np.int64
Array of cells. Each cell contains the number of points in the
cell and the node numbers of the cell.
cell_type : np.uint8
Cell types of each cell. Each cell type numbers can be found from
vtk documentation. See example below.
points : np.ndarray
Numpy array containing point locations.
Examples
--------
>>> import numpy
>>> import vtk
>>> import pyvista
>>> offset = np.array([0, 9])
>>> cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
>>> cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int8)
>>> cell1 = np.array([[0, 0, 0],
... [1, 0, 0],
... [1, 1, 0],
... [0, 1, 0],
... [0, 0, 1],
... [1, 0, 1],
... [1, 1, 1],
... [0, 1, 1]])
>>> cell2 = np.array([[0, 0, 2],
... [1, 0, 2],
... [1, 1, 2],
... [0, 1, 2],
... [0, 0, 3],
... [1, 0, 3],
... [1, 1, 3],
... [0, 1, 3]])
>>> points = np.vstack((cell1, cell2))
>>> grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
"""
# Convert to vtk arrays
vtkcells = CellArray(cells, cell_type.size, deep)
if cell_type.dtype != np.uint8:
cell_type = cell_type.astype(np.uint8)
cell_type_np = cell_type
cell_type = _vtk.numpy_to_vtk(cell_type, deep=deep)
# Convert points to vtkPoints object
points = pyvista.vtk_points(points, deep=deep)
self.SetPoints(points)
# vtk9 does not require an offset array
if _vtk.VTK9:
if offset is not None:
warnings.warn('VTK 9 no longer accepts an offset array',
stacklevel=3)
self.SetCells(cell_type, vtkcells)
else:
if offset is None:
offset = generate_cell_offsets(cells, cell_type_np)
self.SetCells(cell_type, numpy_to_idarr(offset), vtkcells)
def _check_for_consistency(self):
"""Check if size of offsets and celltypes match the number of cells.
Checks if the number of offsets and celltypes correspond to
the number of cells. Called after initialization of the self
from arrays.
"""
if self.n_cells != self.celltypes.size:
raise ValueError(f'Number of cell types ({self.celltypes.size}) '
f'must match the number of cells {self.n_cells})')
if _vtk.VTK9:
if self.n_cells != self.offset.size - 1:
raise ValueError(f'Size of the offset ({self.offset.size}) '
'must be one greater than the number of cells '
f'({self.n_cells})')
else:
if self.n_cells != self.offset.size:
raise ValueError(f'Size of the offset ({self.offset.size}) '
f'must match the number of cells ({self.n_cells})')
@property
def cells(self):
"""Legacy method: Return a pointer to the cells as a numpy object."""
return _vtk.vtk_to_numpy(self.GetCells().GetData())
@property
def cells_dict(self):
"""Return a dictionary that contains all cells mapped from cell types.
This function returns a np.ndarray for each cell type in an ordered fashion.
Note that this function only works with element types of fixed sizes
Returns
-------
cells_dict : dict
A dictionary mapping containing all cells of this unstructured grid.
Structure: vtk_enum_type (int) -> cells (np.ndarray)
"""
return get_mixed_cells(self)
@property
def cell_connectivity(self):
"""Return a the vtk cell connectivity as a numpy array."""
carr = self.GetCells()
if _vtk.VTK9:
return _vtk.vtk_to_numpy(carr.GetConnectivityArray())
raise AttributeError('Install vtk>=9.0.0 for `cell_connectivity`\n'
'Otherwise, use the legacy `cells` method')
def linear_copy(self, deep=False):
"""Return a copy of the unstructured grid containing only linear cells.
Converts the following cell types to their linear equivalents.
- VTK_QUADRATIC_TETRA --> VTK_TETRA
- VTK_QUADRATIC_PYRAMID --> VTK_PYRAMID
- VTK_QUADRATIC_WEDGE --> VTK_WEDGE
- VTK_QUADRATIC_HEXAHEDRON --> VTK_HEXAHEDRON
Parameters
----------
deep : bool
When True, makes a copy of the points array. Default
False. Cells and cell types are always copied.
Returns
-------
grid : pyvista.UnstructuredGrid
UnstructuredGrid containing only linear cells.
"""
lgrid = self.copy(deep)
# grab the vtk object
vtk_cell_type = _vtk.numpy_to_vtk(self.GetCellTypesArray(), deep=True)
celltype = _vtk.vtk_to_numpy(vtk_cell_type)
celltype[celltype == _vtk.VTK_QUADRATIC_TETRA] = _vtk.VTK_TETRA
celltype[celltype == _vtk.VTK_QUADRATIC_PYRAMID] = _vtk.VTK_PYRAMID
celltype[celltype == _vtk.VTK_QUADRATIC_WEDGE] = _vtk.VTK_WEDGE
celltype[celltype == _vtk.VTK_QUADRATIC_HEXAHEDRON] = _vtk.VTK_HEXAHEDRON
# track quad mask for later
quad_quad_mask = celltype == _vtk.VTK_QUADRATIC_QUAD
celltype[quad_quad_mask] = _vtk.VTK_QUAD
quad_tri_mask = celltype == _vtk.VTK_QUADRATIC_TRIANGLE
celltype[quad_tri_mask] = _vtk.VTK_TRIANGLE
vtk_offset = self.GetCellLocationsArray()
cells = _vtk.vtkCellArray()
cells.DeepCopy(self.GetCells())
lgrid.SetCells(vtk_cell_type, vtk_offset, cells)
# fixing bug with display of quad cells
if np.any(quad_quad_mask):
if _vtk.VTK9:
quad_offset = lgrid.offset[:-1][quad_quad_mask]
base_point = lgrid.cell_connectivity[quad_offset]
lgrid.cell_connectivity[quad_offset + 4] = base_point
lgrid.cell_connectivity[quad_offset + 5] = base_point
lgrid.cell_connectivity[quad_offset + 6] = base_point
lgrid.cell_connectivity[quad_offset + 7] = base_point
else:
quad_offset = lgrid.offset[quad_quad_mask]
base_point = lgrid.cells[quad_offset + 1]
lgrid.cells[quad_offset + 5] = base_point
lgrid.cells[quad_offset + 6] = base_point
lgrid.cells[quad_offset + 7] = base_point
lgrid.cells[quad_offset + 8] = base_point
if np.any(quad_tri_mask):
if _vtk.VTK9:
tri_offset = lgrid.offset[:-1][quad_tri_mask]
base_point = lgrid.cell_connectivity[tri_offset]
lgrid.cell_connectivity[tri_offset + 3] = base_point
lgrid.cell_connectivity[tri_offset + 4] = base_point
lgrid.cell_connectivity[tri_offset + 5] = base_point
else:
tri_offset = lgrid.offset[quad_tri_mask]
base_point = lgrid.cells[tri_offset + 1]
lgrid.cells[tri_offset + 4] = base_point
lgrid.cells[tri_offset + 5] = base_point
lgrid.cells[tri_offset + 6] = base_point
return lgrid
@property
def celltypes(self):
"""Get the cell types array."""
return _vtk.vtk_to_numpy(self.GetCellTypesArray())
@property
def offset(self):
"""Get cell locations Array."""
carr = self.GetCells()
if _vtk.VTK9:
# This will be the number of cells + 1.
return _vtk.vtk_to_numpy(carr.GetOffsetsArray())
else: # this is no longer used in >= VTK9
return _vtk.vtk_to_numpy(self.GetCellLocationsArray())
def cast_to_explicit_structured_grid(self):
"""Cast to an explicit structured grid.
Returns
-------
ExplicitStructuredGrid
An explicit structured grid.
Raises
------
TypeError
If the unstructured grid doesn't have the ``'BLOCK_I'``,
``'BLOCK_J'`` and ``'BLOCK_K'`` cells arrays.
See Also
--------
ExplicitStructuredGrid.cast_to_unstructured_grid :
Cast an explicit structured grid to an unstructured grid.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
>>> grid.hide_cells(range(80, 120)) # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
>>> grid = grid.cast_to_unstructured_grid() # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
>>> grid = grid.cast_to_explicit_structured_grid() # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
"""
if not _vtk.VTK9:
raise AttributeError('VTK 9 or higher is required')
s1 = {'BLOCK_I', 'BLOCK_J', 'BLOCK_K'}
s2 = self.cell_arrays.keys()
if not s1.issubset(s2):
raise TypeError("'BLOCK_I', 'BLOCK_J' and 'BLOCK_K' cell arrays are required")
alg = _vtk.vtkUnstructuredGridToExplicitStructuredGrid()
alg.SetInputData(self)
alg.SetInputArrayToProcess(0, 0, 0, 1, 'BLOCK_I')
alg.SetInputArrayToProcess(1, 0, 0, 1, 'BLOCK_J')
alg.SetInputArrayToProcess(2, 0, 0, 1, 'BLOCK_K')
alg.Update()
grid = _get_output(alg)
grid.cell_arrays.remove('ConnectivityFlags') # unrequired
return grid
class StructuredGrid(_vtk.vtkStructuredGrid, PointGrid, StructuredGridFilters):
"""Extend the functionality of a vtk.vtkStructuredGrid object.
Can be initialized in several ways:
- Create empty grid
- Initialize from a vtk.vtkStructuredGrid object
- Initialize directly from the point arrays
See _from_arrays in the documentation for more details on initializing
from point arrays
Examples
--------
>>> import pyvista
>>> import vtk
>>> import numpy as np
Create empty grid
>>> grid = pyvista.StructuredGrid()
Initialize from a vtk.vtkStructuredGrid object
>>> vtkgrid = vtk.vtkStructuredGrid()
>>> grid = pyvista.StructuredGrid(vtkgrid)
Create from NumPy arrays
>>> xrng = np.arange(-10, 10, 2)
>>> yrng = np.arange(-10, 10, 2)
>>> zrng = np.arange(-10, 10, 2)
>>> x, y, z = np.meshgrid(xrng, yrng, zrng)
>>> grid = pyvista.StructuredGrid(x, y, z)
"""
_WRITERS = {'.vtk': _vtk.vtkStructuredGridWriter,
'.vts': _vtk.vtkXMLStructuredGridWriter}
def __init__(self, *args, **kwargs) -> None:
"""Initialize the structured grid."""
super().__init__()
if len(args) == 1:
if isinstance(args[0], _vtk.vtkStructuredGrid):
self.deep_copy(args[0])
elif isinstance(args[0], (str, pathlib.Path)):
self._from_file(args[0], **kwargs)
elif len(args) == 3:
arg0_is_arr = isinstance(args[0], np.ndarray)
arg1_is_arr = isinstance(args[1], np.ndarray)
arg2_is_arr = isinstance(args[2], np.ndarray)
if all([arg0_is_arr, arg1_is_arr, arg2_is_arr]):
self._from_arrays(args[0], args[1], args[2])
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard str representation."""
return DataSet.__str__(self)
def _from_arrays(self, x, y, z):
"""Create VTK structured grid directly from numpy arrays.
Parameters
----------
x : np.ndarray
Position of the points in x direction.
y : np.ndarray
Position of the points in y direction.
z : np.ndarray
Position of the points in z direction.
"""
if not(x.shape == y.shape == z.shape):
raise ValueError('Input point array shapes must match exactly')
# make the output points the same precision as the input arrays
points = np.empty((x.size, 3), x.dtype)
points[:, 0] = x.ravel('F')
points[:, 1] = y.ravel('F')
points[:, 2] = z.ravel('F')
# ensure that the inputs are 3D
dim = list(x.shape)
while len(dim) < 3:
dim.append(1)
# Create structured grid
self.SetDimensions(dim)
self.SetPoints(pyvista.vtk_points(points))
@property
def dimensions(self):
"""Return a length 3 tuple of the grid's dimensions."""
return list(self.GetDimensions())
@dimensions.setter
def dimensions(self, dims):
"""Set the dataset dimensions. Pass a length three tuple of integers."""
nx, ny, nz = dims[0], dims[1], dims[2]
self.SetDimensions(nx, ny, nz)
self.Modified()
@property
def x(self):
"""Return the X coordinates of all points."""
return self._reshape_point_array(self.points[:, 0])
@property
def y(self):
"""Return the Y coordinates of all points."""
return self._reshape_point_array(self.points[:, 1])
@property
def z(self):
"""Return the Z coordinates of all points."""
return self._reshape_point_array(self.points[:, 2])
@property
def points_matrix(self):
"""Points as a 4-D matrix, with x/y/z along the last dimension."""
return self.points.reshape((*self.dimensions, 3), order='F')
def _get_attrs(self):
"""Return the representation methods (internal helper)."""
attrs = PointGrid._get_attrs(self)
attrs.append(("Dimensions", self.dimensions, "{:d}, {:d}, {:d}"))
return attrs
def __getitem__(self, key):
"""Slice subsets of the StructuredGrid, or extract an array field."""
# legacy behavior which looks for a point or cell array
if not isinstance(key, tuple):
return super().__getitem__(key)
# convert slice to VOI specification - only "basic indexing" is supported
voi = []
rate = []
if len(key) != 3:
raise RuntimeError('Slices must have exactly 3 dimensions.')
for i, k in enumerate(key):
if isinstance(k, collections.Iterable):
raise RuntimeError('Fancy indexing is not supported.')
if isinstance(k, numbers.Integral):
start = stop = k
step = 1
elif isinstance(k, slice):
start = k.start if k.start is not None else 0
stop = k.stop - 1 if k.stop is not None else self.dimensions[i]
step = k.step if k.step is not None else 1
voi.extend((start, stop))
rate.append(step)
return self.extract_subset(voi, rate, boundary=False)
def hide_cells(self, ind):
"""Hide cells without deleting them.
Hides cells by setting the ghost_cells array to HIDDEN_CELL.
Parameters
----------
ind : iterable
List or array of cell indices to be hidden. The array can
also be a boolean array of the same size as the number of
cells.
Examples
--------
Hide part of the middle of a structured surface.
>>> import pyvista as pv
>>> import numpy as np
>>> x = np.arange(-10, 10, 0.25)
>>> y = np.arange(-10, 10, 0.25)
>>> z = 0
>>> x, y, z = np.meshgrid(x, y, z)
>>> grid = pv.StructuredGrid(x, y, z)
>>> grid.hide_cells(range(79*30, 79*50))
"""
if isinstance(ind, np.ndarray):
if ind.dtype == np.bool_ and ind.size != self.n_cells:
raise ValueError('Boolean array size must match the '
f'number of cells ({self.n_cells})')
ghost_cells = np.zeros(self.n_cells, np.uint8)
ghost_cells[ind] = _vtk.vtkDataSetAttributes.HIDDENCELL
# NOTE: cells cannot be removed from a structured grid, only
# hidden setting ghost_cells to a value besides
# vtk.vtkDataSetAttributes.HIDDENCELL will not hide them
# properly, additionally, calling self.RemoveGhostCells will
# have no effect
self.cell_arrays[_vtk.vtkDataSetAttributes.GhostArrayName()] = ghost_cells
def _reshape_point_array(self, array):
"""Reshape point data to a 3-D matrix."""
return array.reshape(self.dimensions, order='F')
def _reshape_cell_array(self, array):
"""Reshape cell data to a 3-D matrix."""
cell_dims = np.array(self.dimensions) - 1
cell_dims[cell_dims == 0] = 1
return array.reshape(cell_dims, order='F')
class ExplicitStructuredGrid(_vtk.vtkExplicitStructuredGrid, PointGrid):
"""Extend the functionality of a ``vtk.vtkExplicitStructuredGrid`` object.
Can be initialized by the following:
- Creating an empty grid
- From a ``vtk.vtkExplicitStructuredGrid`` or ``vtk.vtkUnstructuredGrid`` object
- From a VTU or VTK file
- From ``dims`` and ``corners`` arrays
Examples
--------
>>> import numpy as np
>>> import pyvista as pv
>>>
>>> # grid size: ni*nj*nk cells; si, sj, sk steps
>>> ni, nj, nk = 4, 5, 6
>>> si, sj, sk = 20, 10, 1
>>>
>>> # create raw coordinate grid
>>> grid_ijk = np.mgrid[:(ni+1)*si:si, :(nj+1)*sj:sj, :(nk+1)*sk:sk]
>>>
>>> # repeat array along each Cartesian axis for connectivity
>>> for axis in range(1, 4):
... grid_ijk = grid_ijk.repeat(2, axis=axis)
>>>
>>> # slice off unnecessarily doubled edge coordinates
>>> grid_ijk = grid_ijk[:, 1:-1, 1:-1, 1:-1]
>>>
>>> # reorder and reshape to VTK order
>>> corners = grid_ijk.transpose().reshape(-1, 3)
>>>
>>> dims = np.array([ni, nj, nk]) + 1
>>> grid = pv.ExplicitStructuredGrid(dims, corners)
>>> _ = grid.compute_connectivity()
>>> grid.plot(show_edges=True) # doctest: +SKIP
"""
_WRITERS = {'.vtu': _vtk.vtkXMLUnstructuredGridWriter,
'.vtk': _vtk.vtkUnstructuredGridWriter}
def __init__(self, *args, **kwargs):
"""Initialize the explicit structured grid."""
if not _vtk.VTK9:
raise AttributeError('VTK 9 or higher is required')
super().__init__()
n = len(args)
if n == 1:
arg0 = args[0]
if isinstance(arg0, _vtk.vtkExplicitStructuredGrid):
self.deep_copy(arg0)
elif isinstance(arg0, _vtk.vtkUnstructuredGrid):
grid = arg0.cast_to_explicit_structured_grid()
self.deep_copy(grid)
elif isinstance(arg0, (str, pathlib.Path)):
grid = UnstructuredGrid(arg0)
grid = grid.cast_to_explicit_structured_grid()
self.deep_copy(grid)
elif n == 2:
arg0, arg1 = args
if isinstance(arg0, tuple):
arg0 = np.asarray(arg0)
if isinstance(arg1, list):
arg1 = np.asarray(arg1)
arg0_is_arr = isinstance(arg0, np.ndarray)
arg1_is_arr = isinstance(arg1, np.ndarray)
if all([arg0_is_arr, arg1_is_arr]):
self._from_arrays(arg0, arg1)
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard ``str`` representation."""
return DataSet.__str__(self)
def _from_arrays(self, dims, corners):
"""Create a VTK explicit structured grid from NumPy arrays.
Parameters
----------
dims : numpy.ndarray
An array of integers with shape (3,) containing the
topological dimensions of the grid.
corners : numpy.ndarray
An array of floats with shape (number of corners, 3)
containing the coordinates of the corner points.
"""
shape0 = dims-1
shape1 = 2*shape0
ncells = np.prod(shape0)
cells = 8*np.ones((ncells, 9), dtype=int)
points, indices = np.unique(corners, axis=0, return_inverse=True)
connectivity = np.asarray([[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1]])
for c in range(ncells):
i, j, k = np.unravel_index(c, shape0, order='F')
coord = (2*i + connectivity[0],
2*j + connectivity[1],
2*k + connectivity[2])
cinds = np.ravel_multi_index(coord, shape1, order='F')
cells[c, 1:] = indices[cinds]
cells = cells.flatten()
points = pyvista.vtk_points(points)
cells = CellArray(cells, ncells)
self.SetDimensions(dims)
self.SetPoints(points)
self.SetCells(cells)
def cast_to_unstructured_grid(self):
"""Cast to an unstructured grid.
Returns
-------
UnstructuredGrid
An unstructured grid. VTK adds the ``'BLOCK_I'``,
``'BLOCK_J'`` and ``'BLOCK_K'`` cell arrays. These arrays
are required to restore the explicit structured grid.
Warnings
--------
The ghost cell array is disabled before casting the
unstructured grid in order to allow the original structure
and attributes data of the explicit structured grid to be
restored. If you don't need to restore the explicit
structured grid later or want to extract an unstructured
grid from the visible subgrid, use the ``extract_cells``
filter and the cell indices where the ghost cell array is
``0``.
See Also
--------
DataSetFilters.extract_cells :
Extract a subset of a dataset.
UnstructuredGrid.cast_to_explicit_structured_grid :
Cast an unstructured grid to an explicit structured grid.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
>>> grid.hide_cells(range(80, 120)) # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
>>> grid = grid.cast_to_unstructured_grid() # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
>>> grid = grid.cast_to_explicit_structured_grid() # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
"""
grid = ExplicitStructuredGrid()
grid.copy_structure(self)
alg = _vtk.vtkExplicitStructuredGridToUnstructuredGrid()
alg.SetInputDataObject(grid)
alg.Update()
grid = _get_output(alg)
grid.cell_arrays.remove('vtkOriginalCellIds') # unrequired
grid.copy_attributes(self) # copy ghost cell array and other arrays
return grid
def save(self, filename, binary=True):
"""Save this VTK object to file.
Parameters
----------
filename : str
Output file name. VTU and VTK extensions are supported.
binary : bool, optional
If ``True`` (default), write as binary, else ASCII.
Warnings
--------
VTK adds the ``'BLOCK_I'``, ``'BLOCK_J'`` and ``'BLOCK_K'``
cell arrays. These arrays are required to restore the explicit
structured grid.
Examples
--------
>>> import pyvista as pv
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.hide_cells(range(80, 120)) # doctest: +SKIP
>>> grid.save('grid.vtu') # doctest: +SKIP
>>> grid = pv.ExplicitStructuredGrid('grid.vtu') # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
>>> grid.show_cells() # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
"""
grid = self.cast_to_unstructured_grid()
grid.save(filename, binary)
def hide_cells(self, ind, inplace=True):
"""Hide specific cells.
Hides cells by setting the ghost cell array to ``HIDDENCELL``.
Parameters
----------
ind : int or iterable(int)
Cell indices to be hidden. A boolean array of the same
size as the number of cells also is acceptable.
inplace : bool, optional
This method is applied to this grid if ``True`` (default)
or to a copy otherwise.
Returns
-------
grid : ExplicitStructuredGrid or None
A deep copy of this grid if ``inplace=False`` or ``None`` otherwise.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.hide_cells(range(80, 120)) # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
"""
if inplace:
ind = np.asarray(ind)
array = np.zeros(self.n_cells, dtype=np.uint8)
array[ind] = _vtk.vtkDataSetAttributes.HIDDENCELL
name = _vtk.vtkDataSetAttributes.GhostArrayName()
self.cell_arrays[name] = array
return self
else:
grid = self.copy()
grid.hide_cells(ind)
return grid
def show_cells(self, inplace=True):
"""Show hidden cells.
Shows hidden cells by setting the ghost cell array to ``0``
where ``HIDDENCELL``.
Parameters
----------
inplace : bool, optional
This method is applied to this grid if ``True`` (default)
or to a copy otherwise.
Returns
-------
grid : ExplicitStructuredGrid
A deep copy of this grid if ``inplace=False`` with the
hidden cells shown. Otherwise, this dataset with the
shown cells.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.hide_cells(range(80, 120)) # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
>>> grid.show_cells() # doctest: +SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest: +SKIP
"""
if inplace:
name = _vtk.vtkDataSetAttributes.GhostArrayName()
if name in self.cell_arrays.keys():
array = self.cell_arrays[name]
ind = np.argwhere(array == _vtk.vtkDataSetAttributes.HIDDENCELL)
array[ind] = 0
return self
else:
grid = self.copy()
grid.show_cells()
return grid
def _dimensions(self):
# This method is required to avoid conflict if a developer extends `ExplicitStructuredGrid`
# and reimplements `dimensions` to return, for example, the number of cells in the I, J and
# K directions.
dims = self.extent
dims = np.reshape(dims, (3, 2))
dims = np.diff(dims, axis=1)
dims = dims.flatten()
return dims+1
@property
def dimensions(self):
"""Return the topological dimensions of the grid.
Returns
-------
tuple(int)
Number of sampling points in the I, J and Z directions respectively.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.dimensions # doctest: +SKIP
array([5, 6, 7])
"""
return self._dimensions()
@property
def visible_bounds(self):
"""Return the bounding box of the visible cells.
Different from `bounds`, which returns the bounding box of the
complete grid, this method returns the bounding box of the
visible cells, where the ghost cell array is not
``HIDDENCELL``.
Returns
-------
list(float)
The limits of the visible grid in the X, Y and Z
directions respectively.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.hide_cells(range(80, 120)) # doctest: +SKIP
>>> grid.bounds # doctest: +SKIP
[0.0, 80.0, 0.0, 50.0, 0.0, 6.0]
>>> grid.visible_bounds # doctest: +SKIP
[0.0, 80.0, 0.0, 50.0, 0.0, 4.0]
"""
name = _vtk.vtkDataSetAttributes.GhostArrayName()
if name in self.cell_arrays:
array = self.cell_arrays[name]
grid = self.extract_cells(array == 0)
return grid.bounds
else:
return self.bounds
def cell_id(self, coords):
"""Return the cell ID.
Parameters
----------
coords : tuple(int), list(tuple(int)) or numpy.ndarray
Cell structured coordinates.
Returns
-------
ind : int, numpy.ndarray or None
Cell IDs. ``None`` if ``coords`` is outside the grid extent.
See Also
--------
ExplicitStructuredGrid.cell_coords :
Return the cell structured coordinates.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.cell_id((3, 4, 0)) # doctest: +SKIP
19
>>> coords = [(3, 4, 0),
... (3, 2, 1),
... (1, 0, 2),
... (2, 3, 2)]
>>> grid.cell_id(coords) # doctest: +SKIP
array([19, 31, 41, 54])
"""
# `vtk.vtkExplicitStructuredGrid.ComputeCellId` is not used
# here because this method returns invalid cell IDs when
# `coords` is outside the grid extent.
if isinstance(coords, list):
coords = np.asarray(coords)
if isinstance(coords, np.ndarray) and coords.ndim == 2:
ncol = coords.shape[1]
coords = [coords[:, c] for c in range(ncol)]
coords = tuple(coords)
dims = self._dimensions()
try:
ind = np.ravel_multi_index(coords, dims-1, order='F')
except ValueError:
return None
else:
return ind
def cell_coords(self, ind):
"""Return the cell structured coordinates.
Parameters
----------
ind : int or iterable(int)
Cell IDs.
Returns
-------
coords : tuple(int), numpy.ndarray or None
Cell structured coordinates. ``None`` if ``ind`` is
outside the grid extent.
See Also
--------
ExplicitStructuredGrid.cell_id :
Return the cell ID.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.cell_coords(19) # doctest: +SKIP
(3, 4, 0)
>>> grid.cell_coords((19, 31, 41, 54)) # doctest: +SKIP
array([[3, 4, 0],
[3, 2, 1],
[1, 0, 2],
[2, 3, 2]])
"""
dims = self._dimensions()
try:
coords = np.unravel_index(ind, dims-1, order='F')
except ValueError:
return None
else:
if isinstance(coords[0], np.ndarray):
coords = np.stack(coords, axis=1)
return coords
def neighbors(self, ind, rel='connectivity'):
"""Return the indices of neighboring cells.
Parameters
----------
ind : int or iterable(int)
Cell IDs.
rel : str, optional
Defines the neighborhood relationship. If
``'topological'``, returns the ``(i-1, j, k)``, ``(i+1, j,
k)``, ``(i, j-1, k)``, ``(i, j+1, k)``, ``(i, j, k-1)``
and ``(i, j, k+1)`` cells. If ``'connectivity'``
(default), returns only the topological neighbors
considering faces connectivity. If ``'geometric'``,
returns the cells in the ``(i-1, j)``, ``(i+1, j)``,
``(i,j-1)`` and ``(i, j+1)`` vertical cell groups whose
faces intersect.
Returns
-------
indices : list(int)
Indices of neighboring cells.
Examples
--------
>>> import pyvista as pv
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> cell = grid.extract_cells(31) # doctest: +SKIP
>>> ind = grid.neighbors(31) # doctest: +SKIP
>>> neighbors = grid.extract_cells(ind) # doctest: +SKIP
>>>
>>> plotter = pv.Plotter()
>>> plotter.add_axes() # doctest: +SKIP
>>> plotter.add_mesh(cell, color='r', show_edges=True) # doctest: +SKIP
>>> plotter.add_mesh(neighbors, color='w', show_edges=True) # doctest: +SKIP
>>> plotter.show() # doctest: +SKIP
"""
def connectivity(ind):
indices = []
cell_coords = self.cell_coords(ind)
cell_points = self.cell_points(ind)
if cell_points.shape[0] == 8:
faces = [[(-1, 0, 0), (0, 4, 7, 3), (1, 5, 6, 2)],
[(+1, 0, 0), (1, 2, 6, 5), (0, 3, 7, 4)],
[(0, -1, 0), (0, 1, 5, 4), (3, 2, 6, 7)],
[(0, +1, 0), (3, 7, 6, 2), (0, 4, 5, 1)],
[(0, 0, -1), (0, 3, 2, 1), (4, 7, 6, 5)],
[(0, 0, +1), (4, 5, 6, 7), (0, 1, 2, 3)]]
for f in faces:
coords = np.sum([cell_coords, f[0]], axis=0)
ind = self.cell_id(coords)
if ind:
points = self.cell_points(ind)
if points.shape[0] == 8:
a1 = cell_points[f[1], :]
a2 = points[f[2], :]
if np.array_equal(a1, a2):
indices.append(ind)
return indices
def topological(ind):
indices = []
cell_coords = self.cell_coords(ind)
cell_neighbors = [(-1, 0, 0), (1, 0, 0),
(0, -1, 0), (0, 1, 0),
(0, 0, -1), (0, 0, 1)]
for n in cell_neighbors:
coords = np.sum([cell_coords, n], axis=0)
ind = self.cell_id(coords)
if ind:
indices.append(ind)
return indices
def geometric(ind):
indices = []
cell_coords = self.cell_coords(ind)
cell_points = self.cell_points(ind)
if cell_points.shape[0] == 8:
for k in [-1, 1]:
coords = np.sum([cell_coords, (0, 0, k)], axis=0)
ind = self.cell_id(coords)
if ind:
indices.append(ind)
faces = [[(-1, 0, 0), (0, 4, 3, 7), (1, 5, 2, 6)],
[(+1, 0, 0), (2, 6, 1, 5), (3, 7, 0, 4)],
[(0, -1, 0), (1, 5, 0, 4), (2, 6, 3, 7)],
[(0, +1, 0), (3, 7, 2, 6), (0, 4, 1, 5)]]
nk = self.dimensions[2]
for f in faces:
cell_z = cell_points[f[1], 2]
cell_z = np.abs(cell_z)
cell_z = cell_z.reshape((2, 2))
cell_zmin = cell_z.min(axis=1)
cell_zmax = cell_z.max(axis=1)
coords = np.sum([cell_coords, f[0]], axis=0)
for k in range(nk):
coords[2] = k
ind = self.cell_id(coords)
if ind:
points = self.cell_points(ind)
if points.shape[0] == 8:
z = points[f[2], 2]
z = np.abs(z)
z = z.reshape((2, 2))
zmin = z.min(axis=1)
zmax = z.max(axis=1)
if ((zmax[0] > cell_zmin[0] and zmin[0] < cell_zmax[0]) or
(zmax[1] > cell_zmin[1] and zmin[1] < cell_zmax[1]) or
(zmin[0] > cell_zmax[0] and zmax[1] < cell_zmin[1]) or
(zmin[1] > cell_zmax[1] and zmax[0] < cell_zmin[0])):
indices.append(ind)
return indices
if isinstance(ind, int):
ind = [ind]
rel = eval(rel)
indices = set()
for i in ind:
indices.update(rel(i))
return sorted(indices)
def compute_connectivity(self, inplace=True):
"""Compute the faces connectivity flags array.
This method checks the faces connectivity of the cells with
their topological neighbors. The result is stored in the
array of integers ``'ConnectivityFlags'``. Each value in this
array must be interpreted as a binary number, where the digits
shows the faces connectivity of a cell with its topological
neighbors -Z, +Z, -Y, +Y, -X and +X respectively. For example,
a cell with ``'ConnectivityFlags'`` equal to ``27``
(``011011``) indicates that this cell is connected by faces
with their neighbors ``(0, 0, 1)``, ``(0, -1, 0)``,
``(-1, 0, 0)`` and ``(1, 0, 0)``.
Parameters
----------
inplace : bool, optional
This method is applied to this grid if ``True`` (default)
or to a copy otherwise.
Returns
-------
grid : ExplicitStructuredGrid
A deep copy of this grid if ``inplace=False``.
See Also
--------
ExplicitStructuredGrid.compute_connections :
Compute an array with the number of connected cell faces.
Examples
--------
>>> from pyvista import examples
>>>
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.compute_connectivity() # doctest: +SKIP
>>> grid.plot(show_edges=True) # doctest: +SKIP
"""
if inplace:
self.ComputeFacesConnectivityFlagsArray()
return self
else:
grid = self.copy()
grid.compute_connectivity()
return grid
def compute_connections(self, inplace=True):
"""Compute an array with the number of connected cell faces.
This method calculates the number of topological cell
neighbors connected by faces. The results are stored in the
``'number_of_connections'`` cell array.
Parameters
----------
inplace : bool, optional
This method is applied to this grid if ``True`` (default)
or to a copy otherwise.
Returns
-------
grid : ExplicitStructuredGrid or None
A deep copy of this grid if ``inplace=False`` or ``None`` otherwise.
See Also
--------
ExplicitStructuredGrid.compute_connectivity :
Compute the faces connectivity flags array.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest: +SKIP
>>> grid.compute_connections() # doctest: +SKIP
>>> grid.plot(show_edges=True) # doctest: +SKIP
"""
if inplace:
if 'ConnectivityFlags' in self.cell_arrays:
array = self.cell_arrays['ConnectivityFlags']
else:
grid = self.compute_connectivity(inplace=False)
array = grid.cell_arrays['ConnectivityFlags']
array = array.reshape((-1, 1))
array = array.astype(np.uint8)
array = np.unpackbits(array, axis=1)
array = array.sum(axis=1)
self.cell_arrays['number_of_connections'] = array
return self
else:
grid = self.copy()
grid.compute_connections()
return grid
|
akaszynski/vtkInterface
|
pyvista/core/pointset.py
|
Python
|
mit
| 62,814
|
[
"Gaussian",
"VTK"
] |
4683799dfde5e9b5770508a36c794402ddeee2323f9f5f4c4b99f80c9bdb5056
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""\
Wrapper script around Rietveld's upload.py that simplifies working with groups
of files.
"""
import json
import optparse
import os
import random
import re
import string
import sys
import tempfile
import time
import urllib2
import breakpad # pylint: disable=W0611
import fix_encoding
import gclient_utils
import presubmit_support
import rietveld
from scm import SVN
import subprocess2
from third_party import upload
__version__ = '1.2.1'
CODEREVIEW_SETTINGS = {
# To make gcl send reviews to a server, check in a file named
# "codereview.settings" (see |CODEREVIEW_SETTINGS_FILE| below) to your
# project's base directory and add the following line to codereview.settings:
# CODE_REVIEW_SERVER: codereview.yourserver.org
}
# globals that store the root of the current repository and the directory where
# we store information about changelists.
REPOSITORY_ROOT = ""
# Filename where we store repository specific information for gcl.
CODEREVIEW_SETTINGS_FILE = "codereview.settings"
CODEREVIEW_SETTINGS_FILE_NOT_FOUND = (
'No %s file found. Please add one.' % CODEREVIEW_SETTINGS_FILE)
# Warning message when the change appears to be missing tests.
MISSING_TEST_MSG = "Change contains new or modified methods, but no new tests!"
# Global cache of files cached in GetCacheDir().
FILES_CACHE = {}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
REVIEWERS_REGEX = r'\s*R=(.+)'
def CheckHomeForFile(filename):
"""Checks the users home dir for the existence of the given file. Returns
the path to the file if it's there, or None if it is not.
"""
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
full_path = os.path.join(home, filename)
if os.path.exists(full_path):
return full_path
return None
def UnknownFiles():
"""Runs svn status and returns unknown files."""
return [
item[1] for item in SVN.CaptureStatus([], GetRepositoryRoot())
if item[0][0] == '?'
]
def GetRepositoryRoot():
"""Returns the top level directory of the current repository.
The directory is returned as an absolute path.
"""
global REPOSITORY_ROOT
if not REPOSITORY_ROOT:
REPOSITORY_ROOT = SVN.GetCheckoutRoot(os.getcwd())
if not REPOSITORY_ROOT:
raise gclient_utils.Error("gcl run outside of repository")
return REPOSITORY_ROOT
def GetInfoDir():
"""Returns the directory where gcl info files are stored."""
return os.path.join(GetRepositoryRoot(), '.svn', 'gcl_info')
def GetChangesDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'changes')
def GetCacheDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'cache')
def GetCachedFile(filename, max_age=60*60*24*3, use_root=False):
"""Retrieves a file from the repository and caches it in GetCacheDir() for
max_age seconds.
use_root: If False, look up the arborescence for the first match, otherwise go
directory to the root repository.
Note: The cache will be inconsistent if the same file is retrieved with both
use_root=True and use_root=False. Don't be stupid.
"""
if filename not in FILES_CACHE:
# Don't try to look up twice.
FILES_CACHE[filename] = None
# First we check if we have a cached version.
try:
cached_file = os.path.join(GetCacheDir(), filename)
except (gclient_utils.Error, subprocess2.CalledProcessError):
return None
if (not os.path.exists(cached_file) or
(time.time() - os.stat(cached_file).st_mtime) > max_age):
dir_info = SVN.CaptureLocalInfo([], '.')
repo_root = dir_info['Repository Root']
if use_root:
url_path = repo_root
else:
url_path = dir_info['URL']
while True:
# Look in the repository at the current level for the file.
for _ in range(5):
content = None
try:
# Take advantage of the fact that svn won't output to stderr in case
# of success but will do in case of failure so don't mind putting
# stderr into content_array.
content_array = []
svn_path = url_path + '/' + filename
args = ['svn', 'cat', svn_path]
if sys.platform != 'darwin':
# MacOSX 10.5.2 has a bug with svn 1.4.4 that will trigger the
# 'Can\'t get username or password' and can be fixed easily.
# The fix doesn't work if the user upgraded to svn 1.6.x. Bleh.
# I don't have time to fix their broken stuff.
args.append('--non-interactive')
gclient_utils.CheckCallAndFilter(
args, cwd='.', filter_fn=content_array.append)
# Exit the loop if the file was found. Override content.
content = '\n'.join(content_array)
break
except (gclient_utils.Error, subprocess2.CalledProcessError):
if content_array[0].startswith(
'svn: Can\'t get username or password'):
ErrorExit('Your svn credentials expired. Please run svn update '
'to fix the cached credentials')
if content_array[0].startswith('svn: Can\'t get password'):
ErrorExit('If are using a Mac and svn --version shows 1.4.x, '
'please hack gcl.py to remove --non-interactive usage, it\'s'
'a bug on your installed copy')
if (content_array[0].startswith('svn: File not found:') or
content_array[0].endswith('path not found')):
break
# Otherwise, fall through to trying again.
if content:
break
if url_path == repo_root:
# Reached the root. Abandoning search.
break
# Go up one level to try again.
url_path = os.path.dirname(url_path)
if content is not None or filename != CODEREVIEW_SETTINGS_FILE:
# Write a cached version even if there isn't a file, so we don't try to
# fetch it each time. codereview.settings must always be present so do
# not cache negative.
gclient_utils.FileWrite(cached_file, content or '')
else:
content = gclient_utils.FileRead(cached_file, 'r')
# Keep the content cached in memory.
FILES_CACHE[filename] = content
return FILES_CACHE[filename]
def GetCodeReviewSetting(key):
"""Returns a value for the given key for this repository."""
# Use '__just_initialized' as a flag to determine if the settings were
# already initialized.
if '__just_initialized' not in CODEREVIEW_SETTINGS:
settings_file = GetCachedFile(CODEREVIEW_SETTINGS_FILE)
if settings_file:
CODEREVIEW_SETTINGS.update(
gclient_utils.ParseCodereviewSettingsContent(settings_file))
CODEREVIEW_SETTINGS.setdefault('__just_initialized', None)
return CODEREVIEW_SETTINGS.get(key, "")
def Warn(msg):
print >> sys.stderr, msg
def ErrorExit(msg):
print >> sys.stderr, msg
sys.exit(1)
def RunShellWithReturnCode(command, print_output=False):
"""Executes a command and returns the output and the return code."""
p = subprocess2.Popen(
command,
cwd=GetRepositoryRoot(),
stdout=subprocess2.PIPE,
stderr=subprocess2.STDOUT,
universal_newlines=True)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
p.stdout.close()
return output, p.returncode
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
return RunShellWithReturnCode(command, print_output)[0]
def FilterFlag(args, flag):
"""Returns True if the flag is present in args list.
The flag is removed from args if present.
"""
if flag in args:
args.remove(flag)
return True
return False
class ChangeInfo(object):
"""Holds information about a changelist.
name: change name.
issue: the Rietveld issue number or 0 if it hasn't been uploaded yet.
patchset: the Rietveld latest patchset number or 0.
description: the description.
files: a list of 2 tuple containing (status, filename) of changed files,
with paths being relative to the top repository directory.
local_root: Local root directory
rietveld: rietveld server for this change
"""
# Kept for unit test support. This is for the old format, it's deprecated.
SEPARATOR = "\n-----\n"
def __init__(self, name, issue, patchset, description, files, local_root,
rietveld_url, needs_upload):
self.name = name
self.issue = int(issue)
self.patchset = int(patchset)
self._description = None
self._reviewers = None
self._set_description(description)
if files is None:
files = []
self._files = files
self.patch = None
self._local_root = local_root
self.needs_upload = needs_upload
self.rietveld = gclient_utils.UpgradeToHttps(
rietveld_url or GetCodeReviewSetting('CODE_REVIEW_SERVER'))
self._rpc_server = None
def _get_description(self):
return self._description
def _set_description(self, description):
# TODO(dpranke): Cloned from git_cl.py. These should be shared.
if not description:
self._description = description
return
parsed_lines = []
reviewers_re = re.compile(REVIEWERS_REGEX)
reviewers = ''
for l in description.splitlines():
matched_reviewers = reviewers_re.match(l)
if matched_reviewers:
reviewers = matched_reviewers.group(1).split(',')
parsed_lines.append(l)
self._reviewers = reviewers
self._description = '\n'.join(parsed_lines)
description = property(_get_description, _set_description)
@property
def reviewers(self):
return self._reviewers
def NeedsUpload(self):
return self.needs_upload
def GetFileNames(self):
"""Returns the list of file names included in this change."""
return [f[1] for f in self._files]
def GetFiles(self):
"""Returns the list of files included in this change with their status."""
return self._files
def GetLocalRoot(self):
"""Returns the local repository checkout root directory."""
return self._local_root
def Exists(self):
"""Returns True if this change already exists (i.e., is not new)."""
return (self.issue or self.description or self._files)
def _NonDeletedFileList(self):
"""Returns a list of files in this change, not including deleted files."""
return [f[1] for f in self.GetFiles()
if not f[0].startswith("D")]
def _AddedFileList(self):
"""Returns a list of files added in this change."""
return [f[1] for f in self.GetFiles() if f[0].startswith("A")]
def Save(self):
"""Writes the changelist information to disk."""
data = json.dumps({
'issue': self.issue,
'patchset': self.patchset,
'needs_upload': self.NeedsUpload(),
'files': self.GetFiles(),
'description': self.description,
'rietveld': self.rietveld,
}, sort_keys=True, indent=2)
gclient_utils.FileWrite(GetChangelistInfoFile(self.name), data)
def Delete(self):
"""Removes the changelist information from disk."""
os.remove(GetChangelistInfoFile(self.name))
def RpcServer(self):
if not self._rpc_server:
if not self.rietveld:
ErrorExit(CODEREVIEW_SETTINGS_FILE_NOT_FOUND)
self._rpc_server = rietveld.CachingRietveld(self.rietveld, None, None)
return self._rpc_server
def CloseIssue(self):
"""Closes the Rietveld issue for this changelist."""
# Newer versions of Rietveld require us to pass an XSRF token to POST, so
# we fetch it from the server.
xsrf_token = self.SendToRietveld(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
# You cannot close an issue with a GET.
# We pass an empty string for the data so it is a POST rather than a GET.
data = [("description", self.description),
("xsrf_token", xsrf_token)]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/close' % self.issue, payload=body,
content_type=ctype)
def UpdateRietveldDescription(self):
"""Sets the description for an issue on Rietveld."""
data = [("description", self.description),]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/description' % self.issue, payload=body,
content_type=ctype)
def GetIssueDescription(self):
"""Returns the issue description from Rietveld."""
return self.SendToRietveld('/%d/description' % self.issue)
def AddComment(self, comment):
"""Adds a comment for an issue on Rietveld.
As a side effect, this will email everyone associated with the issue."""
return self.RpcServer().add_comment(self.issue, comment)
def PrimeLint(self):
"""Do background work on Rietveld to lint the file so that the results are
ready when the issue is viewed."""
if self.issue and self.patchset:
self.SendToRietveld('/lint/issue%s_%s' % (self.issue, self.patchset),
timeout=10)
def SendToRietveld(self, request_path, timeout=None, **kwargs):
"""Send a POST/GET to Rietveld. Returns the response body."""
try:
return self.RpcServer().Send(request_path, timeout=timeout, **kwargs)
except urllib2.URLError:
if timeout is None:
ErrorExit('Error accessing url %s' % request_path)
else:
return None
def MissingTests(self):
"""Returns True if the change looks like it needs unit tests but has none.
A change needs unit tests if it contains any new source files or methods.
"""
SOURCE_SUFFIXES = [".cc", ".cpp", ".c", ".m", ".mm"]
# Ignore third_party entirely.
files = [f for f in self._NonDeletedFileList()
if f.find("third_party") == -1]
added_files = [f for f in self._AddedFileList()
if f.find("third_party") == -1]
# If the change is entirely in third_party, we're done.
if len(files) == 0:
return False
# Any new or modified test files?
# A test file's name ends with "test.*" or "tests.*".
test_files = [test for test in files
if os.path.splitext(test)[0].rstrip("s").endswith("test")]
if len(test_files) > 0:
return False
# Any new source files?
source_files = [item for item in added_files
if os.path.splitext(item)[1] in SOURCE_SUFFIXES]
if len(source_files) > 0:
return True
# Do the long test, checking the files for new methods.
return self._HasNewMethod()
def _HasNewMethod(self):
"""Returns True if the changeset contains any new functions, or if a
function signature has been changed.
A function is identified by starting flush left, containing a "(" before
the next flush-left line, and either ending with "{" before the next
flush-left line or being followed by an unindented "{".
Currently this returns True for new methods, new static functions, and
methods or functions whose signatures have been changed.
Inline methods added to header files won't be detected by this. That's
acceptable for purposes of determining if a unit test is needed, since
inline methods should be trivial.
"""
# To check for methods added to source or header files, we need the diffs.
# We'll generate them all, since there aren't likely to be many files
# apart from source and headers; besides, we'll want them all if we're
# uploading anyway.
if self.patch is None:
self.patch = GenerateDiff(self.GetFileNames())
definition = ""
for line in self.patch.splitlines():
if not line.startswith("+"):
continue
line = line.strip("+").rstrip(" \t")
# Skip empty lines, comments, and preprocessor directives.
# TODO(pamg): Handle multiline comments if it turns out to be a problem.
if line == "" or line.startswith("/") or line.startswith("#"):
continue
# A possible definition ending with "{" is complete, so check it.
if definition.endswith("{"):
if definition.find("(") != -1:
return True
definition = ""
# A { or an indented line, when we're in a definition, continues it.
if (definition != "" and
(line == "{" or line.startswith(" ") or line.startswith("\t"))):
definition += line
# A flush-left line starts a new possible function definition.
elif not line.startswith(" ") and not line.startswith("\t"):
definition = line
return False
@staticmethod
def Load(changename, local_root, fail_on_not_found, update_status):
"""Gets information about a changelist.
Args:
fail_on_not_found: if True, this function will quit the program if the
changelist doesn't exist.
update_status: if True, the svn status will be updated for all the files
and unchanged files will be removed.
Returns: a ChangeInfo object.
"""
info_file = GetChangelistInfoFile(changename)
if not os.path.exists(info_file):
if fail_on_not_found:
ErrorExit("Changelist " + changename + " not found.")
return ChangeInfo(changename, 0, 0, '', None, local_root, None, False)
content = gclient_utils.FileRead(info_file)
save = False
try:
values = ChangeInfo._LoadNewFormat(content)
except ValueError:
try:
values = ChangeInfo._LoadOldFormat(content)
save = True
except ValueError:
ErrorExit(
('Changelist file %s is corrupt.\n'
'Either run "gcl delete %s" or manually edit the file') % (
info_file, changename))
files = values['files']
if update_status:
for item in files[:]:
status_result = SVN.CaptureStatus(item[1], local_root)
if not status_result or not status_result[0][0]:
# File has been reverted.
save = True
files.remove(item)
continue
status = status_result[0][0]
if status != item[0]:
save = True
files[files.index(item)] = (status, item[1])
change_info = ChangeInfo(
changename,
values['issue'],
values['patchset'],
values['description'],
files,
local_root,
values.get('rietveld'),
values['needs_upload'])
if save:
change_info.Save()
return change_info
@staticmethod
def _LoadOldFormat(content):
# The info files have the following format:
# issue_id, patchset\n (, patchset is optional)
# SEPARATOR\n
# filepath1\n
# filepath2\n
# .
# .
# filepathn\n
# SEPARATOR\n
# description
split_data = content.split(ChangeInfo.SEPARATOR, 2)
if len(split_data) != 3:
raise ValueError('Bad change format')
values = {
'issue': 0,
'patchset': 0,
'needs_upload': False,
'files': [],
}
items = split_data[0].split(', ')
if items[0]:
values['issue'] = int(items[0])
if len(items) > 1:
values['patchset'] = int(items[1])
if len(items) > 2:
values['needs_upload'] = (items[2] == "dirty")
for line in split_data[1].splitlines():
status = line[:7]
filename = line[7:]
values['files'].append((status, filename))
values['description'] = split_data[2]
return values
@staticmethod
def _LoadNewFormat(content):
return json.loads(content)
def __str__(self):
out = ['%s:' % self.__class__.__name__]
for k in dir(self):
if k.startswith('__'):
continue
v = getattr(self, k)
if v is self or callable(getattr(self, k)):
continue
out.append(' %s: %r' % (k, v))
return '\n'.join(out)
def GetChangelistInfoFile(changename):
"""Returns the file that stores information about a changelist."""
if not changename or re.search(r'[^\w-]', changename):
ErrorExit("Invalid changelist name: " + changename)
return os.path.join(GetChangesDir(), changename)
def LoadChangelistInfoForMultiple(changenames, local_root, fail_on_not_found,
update_status):
"""Loads many changes and merge their files list into one pseudo change.
This is mainly usefull to concatenate many changes into one for a 'gcl try'.
"""
changes = changenames.split(',')
aggregate_change_info = ChangeInfo(
changenames, 0, 0, '', None, local_root, None, False)
for change in changes:
aggregate_change_info._files += ChangeInfo.Load(
change, local_root, fail_on_not_found, update_status).GetFiles()
return aggregate_change_info
def GetCLs():
"""Returns a list of all the changelists in this repository."""
cls = os.listdir(GetChangesDir())
if CODEREVIEW_SETTINGS_FILE in cls:
cls.remove(CODEREVIEW_SETTINGS_FILE)
return cls
def GenerateChangeName():
"""Generate a random changelist name."""
random.seed()
current_cl_names = GetCLs()
while True:
cl_name = (random.choice(string.ascii_lowercase) +
random.choice(string.digits) +
random.choice(string.ascii_lowercase) +
random.choice(string.digits))
if cl_name not in current_cl_names:
return cl_name
def GetModifiedFiles():
"""Returns a set that maps from changelist name to (status,filename) tuples.
Files not in a changelist have an empty changelist name. Filenames are in
relation to the top level directory of the current repository. Note that
only the current directory and subdirectories are scanned, in order to
improve performance while still being flexible.
"""
files = {}
# Since the files are normalized to the root folder of the repositary, figure
# out what we need to add to the paths.
dir_prefix = os.getcwd()[len(GetRepositoryRoot()):].strip(os.sep)
# Get a list of all files in changelists.
files_in_cl = {}
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
for status, filename in change_info.GetFiles():
files_in_cl[filename] = change_info.name
# Get all the modified files down the current directory.
for line in SVN.CaptureStatus(None, os.getcwd()):
status = line[0]
filename = line[1]
if status[0] == "?":
continue
if dir_prefix:
filename = os.path.join(dir_prefix, filename)
change_list_name = ""
if filename in files_in_cl:
change_list_name = files_in_cl[filename]
files.setdefault(change_list_name, []).append((status, filename))
return files
def GetFilesNotInCL():
"""Returns a list of tuples (status,filename) that aren't in any changelists.
See docstring of GetModifiedFiles for information about path of files and
which directories are scanned.
"""
modified_files = GetModifiedFiles()
if "" not in modified_files:
return []
return modified_files[""]
def ListFiles(show_unknown_files):
files = GetModifiedFiles()
cl_keys = files.keys()
cl_keys.sort()
for cl_name in cl_keys:
if not cl_name:
continue
note = ""
change_info = ChangeInfo.Load(cl_name, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
if len(change_info.GetFiles()) != len(files[cl_name]):
note = " (Note: this changelist contains files outside this directory)"
print "\n--- Changelist " + cl_name + note + ":"
for filename in files[cl_name]:
print "".join(filename)
if show_unknown_files:
unknown_files = UnknownFiles()
if (files.get('') or (show_unknown_files and len(unknown_files))):
print "\n--- Not in any changelist:"
for item in files.get('', []):
print "".join(item)
if show_unknown_files:
for filename in unknown_files:
print "? %s" % filename
return 0
def GenerateDiff(files):
return SVN.GenerateDiff(
files, GetRepositoryRoot(), full_move=False, revision=None)
def OptionallyDoPresubmitChecks(change_info, committing, args):
if FilterFlag(args, "--no_presubmit") or FilterFlag(args, "--force"):
breakpad.SendStack(
breakpad.DEFAULT_URL + '/breakpad',
'GclHooksBypassedCommit',
'Issue %s/%s bypassed hook when committing' %
(change_info.rietveld, change_info.issue),
verbose=False)
return presubmit_support.PresubmitOutput()
return DoPresubmitChecks(change_info, committing, True)
def defer_attributes(a, b):
"""Copy attributes from an object (like a function) to another."""
for x in dir(a):
if not getattr(b, x, None):
setattr(b, x, getattr(a, x))
def need_change(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not len(args) == 1:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(), True, True)
return function(change_info)
defer_attributes(function, hook)
hook.need_change = True
hook.no_args = True
return hook
def need_change_and_args(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not args:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
return function(change_info, args)
defer_attributes(function, hook)
hook.need_change = True
return hook
def no_args(function):
"""Make sure no args are passed."""
# pylint: disable=W0612,W0621
def hook(args):
if args:
ErrorExit("Doesn't support arguments")
return function()
defer_attributes(function, hook)
hook.no_args = True
return hook
def attrs(**kwargs):
"""Decorate a function with new attributes."""
def decorate(function):
for k in kwargs:
setattr(function, k, kwargs[k])
return function
return decorate
@no_args
def CMDopened():
"""Lists modified files in the current directory down."""
return ListFiles(False)
@no_args
def CMDstatus():
"""Lists modified and unknown files in the current directory down."""
return ListFiles(True)
@need_change_and_args
@attrs(usage='[--no_presubmit] [--no_watchlists]')
def CMDupload(change_info, args):
"""Uploads the changelist to the server for review.
This does not submit a try job; use gcl try to submit a try job.
"""
if '-s' in args or '--server' in args:
ErrorExit('Don\'t use the -s flag, fix codereview.settings instead')
if not change_info.GetFiles():
print "Nothing to upload, changelist is empty."
return 0
output = OptionallyDoPresubmitChecks(change_info, False, args)
if not output.should_continue():
return 1
no_watchlists = (FilterFlag(args, "--no_watchlists") or
FilterFlag(args, "--no-watchlists"))
# Map --send-mail to --send_mail
if FilterFlag(args, "--send-mail"):
args.append("--send_mail")
# Replace -m with -t and --message with --title, but make sure to
# preserve anything after the -m/--message.
found_deprecated_arg = [False]
def replace_message(a):
if a.startswith('-m'):
found_deprecated_arg[0] = True
return '-t' + a[2:]
elif a.startswith('--message'):
found_deprecated_arg[0] = True
return '--title' + a[9:]
return a
args = map(replace_message, args)
if found_deprecated_arg[0]:
print >> sys.stderr, (
'\nWARNING: Use -t or --title to set the title of the patchset.\n'
'In the near future, -m or --message will send a message instead.\n'
'See http://goo.gl/JGg0Z for details.\n')
upload_arg = ["upload.py", "-y"]
upload_arg.append("--server=%s" % change_info.rietveld)
reviewers = change_info.reviewers or output.reviewers
if (reviewers and
not any(arg.startswith('-r') or arg.startswith('--reviewer') for
arg in args)):
upload_arg.append('--reviewers=%s' % ','.join(reviewers))
upload_arg.extend(args)
desc_file = None
try:
if change_info.issue:
# Uploading a new patchset.
upload_arg.append("--issue=%d" % change_info.issue)
if not any(i.startswith('--title') or i.startswith('-t') for i in args):
upload_arg.append('--title= ')
else:
# First time we upload.
handle, desc_file = tempfile.mkstemp(text=True)
os.write(handle, change_info.description)
os.close(handle)
# Watchlist processing -- CC people interested in this changeset
# http://dev.chromium.org/developers/contributing-code/watchlists
if not no_watchlists:
import watchlists
watchlist = watchlists.Watchlists(change_info.GetLocalRoot())
watchers = watchlist.GetWatchersForPaths(change_info.GetFileNames())
cc_list = GetCodeReviewSetting("CC_LIST")
if not no_watchlists and watchers:
# Filter out all empty elements and join by ','
cc_list = ','.join(filter(None, [cc_list] + watchers))
if cc_list:
upload_arg.append("--cc=" + cc_list)
upload_arg.append("--file=%s" % desc_file)
if GetCodeReviewSetting("PRIVATE") == "True":
upload_arg.append("--private")
# If we have a lot of files with long paths, then we won't be able to fit
# the command to "svn diff". Instead, we generate the diff manually for
# each file and concatenate them before passing it to upload.py.
if change_info.patch is None:
change_info.patch = GenerateDiff(change_info.GetFileNames())
# Change the current working directory before calling upload.py so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
try:
issue, patchset = upload.RealMain(upload_arg, change_info.patch)
except KeyboardInterrupt:
sys.exit(1)
if issue and patchset:
change_info.issue = int(issue)
change_info.patchset = int(patchset)
change_info.Save()
change_info.PrimeLint()
finally:
os.chdir(previous_cwd)
finally:
if desc_file:
os.remove(desc_file)
print "*** Upload does not submit a try; use gcl try to submit a try. ***"
return 0
@need_change_and_args
@attrs(usage='[--upload]')
def CMDpresubmit(change_info, args):
"""Runs presubmit checks on the change.
The actual presubmit code is implemented in presubmit_support.py and looks
for PRESUBMIT.py files."""
if not change_info.GetFiles():
print('Nothing to presubmit check, changelist is empty.')
return 0
parser = optparse.OptionParser()
parser.add_option('--upload', action='store_true')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % args)
if options.upload:
print('*** Presubmit checks for UPLOAD would report: ***')
return not DoPresubmitChecks(change_info, False, False)
else:
print('*** Presubmit checks for COMMIT would report: ***')
return not DoPresubmitChecks(change_info, True, False)
def TryChange(change_info, args, swallow_exception):
"""Create a diff file of change_info and send it to the try server."""
try:
import trychange
except ImportError:
if swallow_exception:
return 1
ErrorExit("You need to install trychange.py to use the try server.")
trychange_args = []
if change_info:
trychange_args.extend(['--name', change_info.name])
if change_info.issue:
trychange_args.extend(["--issue", str(change_info.issue)])
if change_info.patchset:
trychange_args.extend(["--patchset", str(change_info.patchset)])
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
else:
change = None
trychange_args.extend(args)
return trychange.TryChange(
trychange_args,
change=change,
swallow_exception=swallow_exception,
prog='gcl try',
extra_epilog='\n'
'When called from gcl, use the format gcl try <change_name>.\n')
@need_change_and_args
@attrs(usage='[--no_presubmit]')
def CMDcommit(change_info, args):
"""Commits the changelist to the repository."""
if not change_info.GetFiles():
print "Nothing to commit, changelist is empty."
return 1
# OptionallyDoPresubmitChecks has a side-effect which eats these flags.
bypassed = '--no_presubmit' in args or '--force' in args
output = OptionallyDoPresubmitChecks(change_info, True, args)
if not output.should_continue():
return 1
# We face a problem with svn here: Let's say change 'bleh' modifies
# svn:ignore on dir1\. but another unrelated change 'pouet' modifies
# dir1\foo.cc. When the user `gcl commit bleh`, foo.cc is *also committed*.
# The only fix is to use --non-recursive but that has its issues too:
# Let's say if dir1 is deleted, --non-recursive must *not* be used otherwise
# you'll get "svn: Cannot non-recursively commit a directory deletion of a
# directory with child nodes". Yay...
commit_cmd = ["svn", "commit"]
if change_info.issue:
# Get the latest description from Rietveld.
change_info.description = change_info.GetIssueDescription()
commit_message = change_info.description.replace('\r\n', '\n')
if change_info.issue:
server = change_info.rietveld
if not server.startswith("http://") and not server.startswith("https://"):
server = "http://" + server
commit_message += ('\nReview URL: %s/%d' % (server, change_info.issue))
handle, commit_filename = tempfile.mkstemp(text=True)
os.write(handle, commit_message)
os.close(handle)
try:
handle, targets_filename = tempfile.mkstemp(text=True)
os.write(handle, "\n".join(change_info.GetFileNames()))
os.close(handle)
try:
commit_cmd += ['--file=' + commit_filename]
commit_cmd += ['--targets=' + targets_filename]
# Change the current working directory before calling commit.
output = ''
try:
output = RunShell(commit_cmd, True)
except subprocess2.CalledProcessError, e:
ErrorExit('Commit failed.\n%s' % e)
finally:
os.remove(commit_filename)
finally:
os.remove(targets_filename)
if output.find("Committed revision") != -1:
change_info.Delete()
if change_info.issue:
revision = re.compile(".*?\nCommitted revision (\d+)",
re.DOTALL).match(output).group(1)
viewvc_url = GetCodeReviewSetting('VIEW_VC')
change_info.description += '\n'
if viewvc_url and revision:
change_info.description += "\nCommitted: " + viewvc_url + revision
elif revision:
change_info.description += "\nCommitted: " + revision
change_info.CloseIssue()
props = change_info.RpcServer().get_issue_properties(
change_info.issue, False)
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d manually as r%s" % (patch_num, revision)
comment += ' (presubmit successful).' if not bypassed else '.'
change_info.AddComment(comment)
return 0
def CMDchange(args):
"""Creates or edits a changelist.
Only scans the current directory and subdirectories.
"""
# Verify the user is running the change command from a read-write checkout.
svn_info = SVN.CaptureLocalInfo([], '.')
if not svn_info:
ErrorExit("Current checkout is unversioned. Please retry with a versioned "
"directory.")
if len(args) == 0:
# Generate a random changelist name.
changename = GenerateChangeName()
elif args[0] == '--force':
changename = GenerateChangeName()
else:
changename = args[0]
change_info = ChangeInfo.Load(changename, GetRepositoryRoot(), False, True)
if len(args) == 2:
if not os.path.isfile(args[1]):
ErrorExit('The change "%s" doesn\'t exist.' % args[1])
f = open(args[1], 'rU')
override_description = f.read()
f.close()
else:
override_description = None
if change_info.issue and not change_info.NeedsUpload():
try:
description = change_info.GetIssueDescription()
except urllib2.HTTPError, err:
if err.code == 404:
# The user deleted the issue in Rietveld, so forget the old issue id.
description = change_info.description
change_info.issue = 0
change_info.Save()
else:
ErrorExit("Error getting the description from Rietveld: " + err)
else:
if override_description:
description = override_description
else:
description = change_info.description
other_files = GetFilesNotInCL()
# Edited files (as opposed to files with only changed properties) will have
# a letter for the first character in the status string.
file_re = re.compile(r"^[a-z].+\Z", re.IGNORECASE)
affected_files = [x for x in other_files if file_re.match(x[0])]
unaffected_files = [x for x in other_files if not file_re.match(x[0])]
description = description.rstrip() + '\n'
separator1 = ("\n---All lines above this line become the description.\n"
"---Repository Root: " + change_info.GetLocalRoot() + "\n"
"---Paths in this changelist (" + change_info.name + "):\n")
separator2 = "\n\n---Paths modified but not in any changelist:\n\n"
text = (description + separator1 + '\n' +
'\n'.join([f[0] + f[1] for f in change_info.GetFiles()]))
if change_info.Exists():
text += (separator2 +
'\n'.join([f[0] + f[1] for f in affected_files]) + '\n')
else:
text += ('\n'.join([f[0] + f[1] for f in affected_files]) + '\n' +
separator2)
text += '\n'.join([f[0] + f[1] for f in unaffected_files]) + '\n'
result = gclient_utils.RunEditor(text, False)
if not result:
ErrorExit('Running editor failed')
split_result = result.split(separator1, 1)
if len(split_result) != 2:
ErrorExit("Don't modify the text starting with ---!\n\n%r" % result)
# Update the CL description if it has changed.
new_description = split_result[0]
cl_files_text = split_result[1]
if new_description != description or override_description:
change_info.description = new_description
change_info.needs_upload = True
new_cl_files = []
for line in cl_files_text.splitlines():
if not len(line):
continue
if line.startswith("---"):
break
status = line[:7]
filename = line[7:]
new_cl_files.append((status, filename))
if (not len(change_info.GetFiles()) and not change_info.issue and
not len(new_description) and not new_cl_files):
ErrorExit("Empty changelist not saved")
change_info._files = new_cl_files
change_info.Save()
if svn_info.get('URL', '').startswith('http:'):
Warn("WARNING: Creating CL in a read-only checkout. You will need to "
"commit using a commit queue!")
print change_info.name + " changelist saved."
if change_info.MissingTests():
Warn("WARNING: " + MISSING_TEST_MSG)
# Update the Rietveld issue.
if change_info.issue and change_info.NeedsUpload():
change_info.UpdateRietveldDescription()
change_info.needs_upload = False
change_info.Save()
return 0
@need_change_and_args
def CMDlint(change_info, args):
"""Runs cpplint.py on all the files in the change list.
Checks all the files in the changelist for possible style violations.
"""
# Access to a protected member _XX of a client class
# pylint: disable=W0212
try:
import cpplint
import cpplint_chromium
except ImportError:
ErrorExit("You need to install cpplint.py to lint C++ files.")
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
# Process cpplints arguments if any.
filenames = cpplint.ParseArguments(args + change_info.GetFileNames())
white_list = GetCodeReviewSetting("LINT_REGEX")
if not white_list:
white_list = DEFAULT_LINT_REGEX
white_regex = re.compile(white_list)
black_list = GetCodeReviewSetting("LINT_IGNORE_REGEX")
if not black_list:
black_list = DEFAULT_LINT_IGNORE_REGEX
black_regex = re.compile(black_list)
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print "Ignoring file %s" % filename
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print "Skipping file %s" % filename
finally:
os.chdir(previous_cwd)
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
return 1
def DoPresubmitChecks(change_info, committing, may_prompt):
"""Imports presubmit, then calls presubmit.DoPresubmitChecks."""
root_presubmit = GetCachedFile('PRESUBMIT.py', use_root=True)
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
output = presubmit_support.DoPresubmitChecks(
change=change,
committing=committing,
verbose=False,
output_stream=sys.stdout,
input_stream=sys.stdin,
default_presubmit=root_presubmit,
may_prompt=may_prompt,
rietveld_obj=change_info.RpcServer())
if not output.should_continue() and may_prompt:
# TODO(dpranke): move into DoPresubmitChecks(), unify cmd line args.
print "\nPresubmit errors, can't continue (use --no_presubmit to bypass)"
return output
@no_args
def CMDchanges():
"""Lists all the changelists and their files."""
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
print "\n--- Changelist " + change_info.name + ":"
for filename in change_info.GetFiles():
print "".join(filename)
return 0
@no_args
def CMDdeleteempties():
"""Delete all changelists that have no files."""
print "\n--- Deleting:"
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
if not len(change_info.GetFiles()):
print change_info.name
change_info.Delete()
return 0
@no_args
def CMDnothave():
"""Lists files unknown to Subversion."""
for filename in UnknownFiles():
print "? " + "".join(filename)
return 0
@attrs(usage='<svn options>')
def CMDdiff(args):
"""Diffs all files in the changelist or all files that aren't in a CL."""
files = None
if args:
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
files = change_info.GetFileNames()
else:
files = [f[1] for f in GetFilesNotInCL()]
root = GetRepositoryRoot()
cmd = ['svn', 'diff']
cmd.extend([os.path.join(root, x) for x in files])
cmd.extend(args)
return RunShellWithReturnCode(cmd, print_output=True)[1]
@no_args
def CMDsettings():
"""Prints code review settings for this checkout."""
# Force load settings
GetCodeReviewSetting("UNKNOWN")
del CODEREVIEW_SETTINGS['__just_initialized']
print '\n'.join(("%s: %s" % (str(k), str(v))
for (k,v) in CODEREVIEW_SETTINGS.iteritems()))
return 0
@need_change
def CMDdescription(change_info):
"""Prints the description of the specified change to stdout."""
print change_info.description
return 0
def CMDdelete(args):
"""Deletes a changelist."""
if not len(args) == 1:
ErrorExit('You need to pass a change list name')
filepath = GetChangelistInfoFile(args[0])
if not os.path.isfile(filepath):
ErrorExit('You need to pass a valid change list name')
os.remove(filepath)
return 0
def CMDtry(args):
"""Sends the change to the tryserver to do a test run on your code.
To send multiple changes as one path, use a comma-separated list of
changenames. Use 'gcl help try' for more information!"""
# When the change contains no file, send the "changename" positional
# argument to trychange.py.
# When the command is 'try' and --patchset is used, the patch to try
# is on the Rietveld server.
if not args:
ErrorExit("You need to pass a change list name")
if args[0].find(',') != -1:
change_info = LoadChangelistInfoForMultiple(args[0], GetRepositoryRoot(),
True, True)
else:
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(),
True, True)
if change_info.GetFiles():
args = args[1:]
else:
change_info = None
return TryChange(change_info, args, swallow_exception=False)
@attrs(usage='<old-name> <new-name>')
def CMDrename(args):
"""Renames an existing change."""
if len(args) != 2:
ErrorExit("Usage: gcl rename <old-name> <new-name>.")
src, dst = args
src_file = GetChangelistInfoFile(src)
if not os.path.isfile(src_file):
ErrorExit("Change '%s' does not exist." % src)
dst_file = GetChangelistInfoFile(dst)
if os.path.isfile(dst_file):
ErrorExit("Change '%s' already exists; pick a new name." % dst)
os.rename(src_file, dst_file)
print "Change '%s' renamed '%s'." % (src, dst)
return 0
def CMDpassthru(args):
"""Everything else that is passed into gcl we redirect to svn.
It assumes a change list name is passed and is converted with the files names.
"""
if not args or len(args) < 2:
ErrorExit("You need to pass a change list name for this svn fall-through "
"command")
cl_name = args[1]
args = ["svn", args[0]]
if len(args) > 1:
root = GetRepositoryRoot()
change_info = ChangeInfo.Load(cl_name, root, True, True)
args.extend([os.path.join(root, x) for x in change_info.GetFileNames()])
return RunShellWithReturnCode(args, print_output=True)[1]
def Command(name):
return getattr(sys.modules[__name__], 'CMD' + name, None)
def GenUsage(command):
"""Modify an OptParse object with the function's documentation."""
obj = Command(command)
display = command
more = getattr(obj, 'usage', '')
if command == 'help':
display = '<command>'
need_change_val = ''
if getattr(obj, 'need_change', None):
need_change_val = ' <change_list>'
options = ' [options]'
if getattr(obj, 'no_args', None):
options = ''
res = 'Usage: gcl %s%s%s %s\n\n' % (display, need_change_val, options, more)
res += re.sub('\n ', '\n', obj.__doc__)
return res
def CMDhelp(args):
"""Prints this help or help for the given command."""
if args and 'CMD' + args[0] in dir(sys.modules[__name__]):
print GenUsage(args[0])
# These commands defer to external tools so give this info too.
if args[0] == 'try':
TryChange(None, ['--help'], swallow_exception=False)
if args[0] == 'upload':
upload.RealMain(['upload.py', '--help'])
return 0
print GenUsage('help')
print sys.modules[__name__].__doc__
print 'version ' + __version__ + '\n'
print('Commands are:\n' + '\n'.join([
' %-12s %s' % (fn[3:], Command(fn[3:]).__doc__.split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
return 0
def main(argv):
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
if not argv:
argv = ['help']
command = Command(argv[0])
# Help can be run from anywhere.
if command == CMDhelp:
return command(argv[1:])
try:
GetRepositoryRoot()
except (gclient_utils.Error, subprocess2.CalledProcessError):
print >> sys.stderr, 'To use gcl, you need to be in a subversion checkout.'
return 1
# Create the directories where we store information about changelists if it
# doesn't exist.
try:
if not os.path.exists(GetInfoDir()):
os.mkdir(GetInfoDir())
if not os.path.exists(GetChangesDir()):
os.mkdir(GetChangesDir())
if not os.path.exists(GetCacheDir()):
os.mkdir(GetCacheDir())
if command:
return command(argv[1:])
# Unknown command, try to pass that to svn
return CMDpassthru(argv)
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
print >> sys.stderr, 'Got an exception'
print >> sys.stderr, str(e)
return 1
except upload.ClientLoginError, e:
print >> sys.stderr, 'Got an exception logging in to Rietveld'
print >> sys.stderr, str(e)
return 1
except urllib2.HTTPError, e:
if e.code != 500:
raise
print >> sys.stderr, (
'AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e))
return 1
if __name__ == "__main__":
fix_encoding.fix_encoding()
sys.exit(main(sys.argv[1:]))
|
nevir/plexability
|
extern/depot_tools/gcl.py
|
Python
|
gpl-2.0
| 49,766
|
[
"VisIt"
] |
28696d299d94fe6448a2e98435607efbf668b2c623092017a3e6e84abec511ec
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Code is originally adapted from MILK: Machine Learning Toolkit
# Copyright (C) 2008-2011, Luis Pedro Coelho <luis@luispedro.org>
# License: MIT. See COPYING.MIT file in the milk distribution
# Authors: Brian Holt, Peter Prettenhofer, Satrajit Ghosh, Gilles Louppe,
# Noel Dawe
# License: BSD3
from __future__ import division
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.selector_mixin import SelectorMixin
from ..utils import array2d, check_random_state
from ..utils.validation import check_arrays
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CLASSIFICATION = {
"gini": _tree.Gini,
"entropy": _tree.Entropy,
}
REGRESSION = {
"mse": _tree.MSE,
}
def export_graphviz(decision_tree, out_file=None, feature_names=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to graphviz.
out : file object or string, optional (default=None)
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
Returns
-------
out_file : file object
The file object to which the tree was exported. The user is
expected to `close()` this object when done with it.
Examples
--------
>>> import os
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> import tempfile
>>> export_file = tree.export_graphviz(clf,
... out_file='test_export_graphvix.dot')
>>> export_file.close()
>>> os.unlink(export_file.name)
"""
def node_to_str(tree, node_id):
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "error = %.4f\\nsamples = %s\\nvalue = %s" \
% (tree.init_error[node_id],
tree.n_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\nerror = %s\\nsamples = %s\\nvalue = %s" \
% (feature,
tree.threshold[node_id],
tree.init_error[node_id],
tree.n_samples[node_id],
value)
def recurse(tree, node_id, parent=None):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF: # and right_child != _tree.TREE_LEAF
recurse(tree, left_child, node_id)
recurse(tree, right_child, node_id)
if out_file is None:
out_file = "tree.dot"
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0)
else:
recurse(decision_tree.tree_, 0)
out_file.write("}")
return out_file
class BaseDecisionTree(BaseEstimator, SelectorMixin):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state):
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
if compute_importances:
warn("Setting compute_importances=True is no longer "
"required. Variable importances are now computed on the fly "
"when accessing the feature_importances_ attribute. This "
"parameter will be removed in 0.15.", DeprecationWarning)
self.compute_importances = compute_importances
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.find_split_ = _tree.TREE_SPLIT_BEST
self.tree_ = None
def fit(self, X, y,
sample_mask=None, X_argsorted=None,
check_input=True, sample_weight=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32``
and ``order='F'`` for maximum efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Use ``dtype=np.float64`` and ``order='C'`` for maximum
efficiency.
sample_mask : array-like, shape = [n_samples], dtype = bool or None
A bit mask that encodes the rows of ``X`` that should be
used to build the decision tree. It can be used for bagging
without the need to create of copy of ``X``.
If None a mask will be created that includes all samples.
X_argsorted : array-like, shape = [n_samples, n_features] or None
Each column of ``X_argsorted`` holds the row indices of ``X``
sorted according to the value of the corresponding feature
in ascending order.
I.e. ``X[X_argsorted[i, k], k] <= X[X_argsorted[j, k], k]``
for each j > i.
If None, ``X_argsorted`` is computed internally.
The argument is supported to enable multiple decision trees
to share the data structure and to avoid re-computation in
tree ensembles. For maximum efficiency use dtype np.int32.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
if check_input:
X, y = check_arrays(X, y)
random_state = check_random_state(self.random_state)
# Convert data
if (getattr(X, "dtype", None) != DTYPE or
X.ndim != 2 or
not X.flags.fortran):
X = array2d(X, dtype=DTYPE, order="F")
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
unique = np.unique(y[:, k])
self.classes_.append(unique)
self.n_classes_.append(unique.shape[0])
y[:, k] = np.searchsorted(unique, y[:, k])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if is_classification:
criterion = CLASSIFICATION[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = REGRESSION[self.criterion](self.n_outputs_)
# Check parameters
max_depth = np.inf if self.max_depth is None else self.max_depth
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
else:
max_features = self.max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if self.min_density < 0.0 or self.min_density > 1.0:
raise ValueError("min_density must be in [0, 1]")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if sample_mask is not None:
sample_mask = np.asarray(sample_mask, dtype=np.bool)
if sample_mask.shape[0] != n_samples:
raise ValueError("Length of sample_mask=%d does not match "
"number of samples=%d"
% (sample_mask.shape[0], n_samples))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if X_argsorted is not None:
X_argsorted = np.asarray(X_argsorted, dtype=np.int32,
order='F')
if X_argsorted.shape != X.shape:
raise ValueError("Shape of X_argsorted does not match "
"the shape of X")
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
self.tree_ = _tree.Tree(self.n_features_, self.n_classes_,
self.n_outputs_, criterion, max_depth,
min_samples_split, self.min_samples_leaf,
self.min_density, max_features,
self.find_split_, random_state)
self.tree_.build(X, y,
sample_weight=sample_weight,
sample_mask=sample_mask,
X_argsorted=X_argsorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE, order="F")
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba[:, 0], axis=1),
axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought by that
feature. It is also known as the Gini importance [4]_.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought by that
feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features=None,
compute_importances=False,
random_state=None):
super(DecisionTreeClassifier, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE, order="F")
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, 0, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
max_features : int, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought by that
feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
R2 scores (a.k.a. coefficient of determination) over 10-folds CV:
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features=None,
compute_importances=False,
random_state=None):
super(DecisionTreeRegressor, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
compute_importances=False,
random_state=None):
super(ExtraTreeClassifier, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
self.find_split_ = _tree.TREE_SPLIT_RANDOM
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier : A classifier base on extremely randomized trees
sklearn.ensemble.ExtraTreesClassifier : An ensemble of extra-trees for
classification
sklearn.ensemble.ExtraTreesRegressor : An ensemble of extra-trees for
regression
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
compute_importances=False,
random_state=None):
super(ExtraTreeRegressor, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
self.find_split_ = _tree.TREE_SPLIT_RANDOM
|
maxlikely/scikit-learn
|
sklearn/tree/tree.py
|
Python
|
bsd-3-clause
| 32,046
|
[
"Brian"
] |
1f0db6681350e17dd7db2566d9a4342ee50e0150d0ea9ede930c9d6c0870e885
|
#!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Contains class for common routines for loading all BEDS data
Mike McCann
MBARI 13 May 2013
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE']='settings'
project_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) # settings.py is one dir up
import DAPloaders
from loaders import LoadScript
class BEDSLoader(LoadScript):
'''
Common routines for loading all BEDS data
'''
brownish = {'bed01': '8c510a',
'bed02': 'bf812d',
'bed03': '4f812d',
}
colors = { 'bed01': 'ffeda0',
'bed02': 'ffeda0',
'bed03': '4feda0',
}
def loadBEDS(self, stride=None, featureType='trajectory'):
'''
BEDS specific load functions; featureType can be 'trajectory' or 'timeSeries'. Use 'trajectory' for events that we've fudged
into a trajectory netCDF file using the canyon's thalweg. Use 'timeSeries' for events for which the BED does not significantly translate.
'''
stride = stride or self.stride
for (aName, pName, file, plotTimeSeriesDepth) in zip(
[ a + ' (stride=%d)' % stride for a in self.bed_files],
self.bed_platforms, self.bed_files, self.bed_depths):
url = self.bed_base + file
if featureType == 'trajectory':
# To get timeSeries plotting for trajectories (in the Parameter tab of the UI) assign a plotTimeSeriesDepth value of the starting depth in meters.
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName, pName, self.colors[pName.lower()], 'bed', 'deployment',
self.bed_parms, self.dbAlias, stride, plotTimeSeriesDepth=plotTimeSeriesDepth, grdTerrain=self.grdTerrain)
elif featureType == 'timeSeries':
DAPloaders.runTimeSeriesLoader(url, self.campaignName, self.campaignDescription, aName, pName, self.colors[pName.lower()], 'bed', 'deployment',
self.bed_parms, self.dbAlias, stride)
# Leave commented out to indicate how this would be used (X3DOM can't handle old style timestamp routing that we used to do in VRML)
##self.addPlaybackResources(x3dplaybackurl, aName)
self.addPlatformResources('http://dods.mbari.org/data/beds/x3d/beds_housing_with_axes.x3d', pName)
if __name__ == '__main__':
'''
Test operation of this class
'''
cl = BEDSLoader('stoqs_beds2013', 'Test BEDS Load')
cl.stride = 1
cl.bed_base = 'http://odss-test.shore.mbari.org/thredds/dodsC/BEDS_2013/beds01/'
cl.bed_files = ['BED00039.nc']
cl.bed_parms = ['XA', 'YA', 'ZA', 'XR', 'YR', 'ZR', 'PRESS', 'BED_DEPTH']
cl.loadBEDS()
|
google-code-export/stoqs
|
loaders/BEDS/__init__.py
|
Python
|
gpl-3.0
| 3,108
|
[
"NetCDF"
] |
477a46b3e8ad008bb8b083f7502d825b5de66881791ec4842031be2c4b98892c
|
"""
.. _plot_stc:
=================
04. Plot contrast
=================
Group average of dSPM solutions obtained by :ref:`plot_events_inverse` for the
contrast between both types of faces together and scrambled at 170 ms
poststimulus. The image was produced by subtracting normalized solutions of
faces to the ones of scrambled.
"""
import os
import json
import pprint
import os.path as op
import numpy as np
from mayavi import mlab
import mne
# Read experiment params as json
params = json.load(open("params.json"))
pprint.pprint({'parameters': params})
data_type = params["general"]["data_type"]
subject_ids = params["general"]["subject_ids"]
NJOBS = params["general"]["NJOBS"]
session_ids = params["general"]["session_ids"]
conditions = params["general"]["conditions"]
if "data_path" in params["general"].keys():
data_path = params["general"]["data_path"]
else:
data_path = op.expanduser("~")
print("data_path : %s" % data_path)
subjects_dir = op.join(data_path, params["general"]["subjects_dir"])
# TODO: if you ran 02 and 03 separately set this path
morph_stc_path = \
op.join(data_path, 'source_dsamp_full_reconstruction_dSPM_aparc',
'_subject_id_{sbj}', 'morph_stc')
'''
# if you ran 02-03 together set this path
morph_stc_path = \
op.join(data_path, 'preprocessing_full_inverse/full_inv_pipeline',
'_subject_id_{sbj}', 'morph_stc')
'''
# os.environ['ETS_TOOLKIT'] = 'qt4'
os.environ['QT_API'] = 'pyqt5'
fig_path = op.join(data_path, 'figures')
if not os.path.isdir(fig_path):
os.mkdir(fig_path)
# PLot
stc_condition = list()
for cond in conditions:
stcs = list()
for subject in subject_ids:
out_path = morph_stc_path.format(sbj=subject)
stc = mne.read_source_estimate(
op.join(out_path, 'mne_dSPM_inverse_morph-%s' % (cond)))
stcs.append(stc)
data = np.average([np.abs(s.data) for s in stcs], axis=0)
stc = mne.SourceEstimate(data, stcs[0].vertices,
stcs[0].tmin, stcs[0].tstep, 'fsaverage')
del stcs
stc_condition.append(stc)
data = stc_condition[0].data / np.max(stc_condition[0].data) + \
stc_condition[2].data / np.max(stc_condition[2].data) - \
stc_condition[1].data / np.max(stc_condition[1].data)
data = np.abs(data)
stc_contrast = mne.SourceEstimate(
data, stc_condition[0].vertices, stc_condition[0].tmin,
stc_condition[0].tstep, 'fsaverage')
# stc_contrast.save(op.join(fig_path, 'stc_dspm_difference_norm'))
lims = (0.25, 0.75, 1)
clim = dict(kind='value', lims=lims)
# TODO use auto with py39
brain_dspm = stc_contrast.plot(
views=['ven'], hemi='both', subject='fsaverage', subjects_dir=subjects_dir,
initial_time=0.17, time_unit='s', background='w',
clim=clim, foreground='k', backend='mayavi')
mlab.view(90, 180, roll=180, focalpoint=(0., 15., 0.), distance=500)
brain_dspm.save_image(op.join(fig_path, 'dspm-contrast'))
|
neuropycon/ephypype
|
doc/workshop/meg/04-plot_stc.py
|
Python
|
bsd-3-clause
| 2,920
|
[
"Mayavi"
] |
eafadf5f1eb33e2fc7be7b8ca2dace908aee99446f7353f5618bd9f4fa9bd136
|
# -*- coding: utf-8 -*-
""" ymir.util._ansible
"""
import os
from fabric import api
from peak.util.imports import lazyModule
from ymir import data as ydata
from ymir.base import report as base_report
yapi = lazyModule('ymir.api')
def require_ansible_role(role_name, role_dir, report=base_report):
""" """
if role_name not in os.listdir(role_dir):
report(ydata.FAIL +
"role '{0}' not found in {1}".format(role_name, role_dir))
result = api.local('ansible-galaxy install -p {role_dir} {role_name}'.format(
role_dir=role_dir, role_name=role_name))
if not result.succeeded:
err = "missing role {0} could not be installed".format(
role_name)
raise RuntimeError(err)
report(
ydata.SUCCESS +
"ansible role '{0}' installed to '{1}'".format(role_name, role_dir))
|
mattvonrocketstein/ymir
|
ymir/util/_ansible.py
|
Python
|
mit
| 878
|
[
"Galaxy"
] |
176689e001d365f88b9ffe5afdcd1a243c2b280e0ed8e47ffef47a0f0b043066
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/384')
from data_384 import Fmat_original
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((40,1)))
sigma = np.matrix(np.zeros((40,1)))
DIVS = m/40
while (index < 40):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((40,n)))
DIVS = m/40
for i in range(n):
index = 0
while (index < 40):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf,sigma_rf = feature_to_mu_sigma(Fmat[0:121,0:35])
mu_rm,sigma_rm = feature_to_mu_sigma(Fmat[0:121,35:70])
mu_sf,sigma_sf = feature_to_mu_sigma(Fmat[0:121,70:105])
mu_sm,sigma_sm = feature_to_mu_sigma(Fmat[0:121,105:140])
mu_obj1,sigma_obj1 = feature_to_mu_sigma(Fmat[0:121,140:141])
mu_obj2,sigma_obj2 = feature_to_mu_sigma(Fmat[0:121,141:142])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.1, 0.05, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.1, 0.25, 0.2, 0.15, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.1, 0.25, 0.2, 0.15, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.20, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.20, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.20, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.10, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.10, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.15, 0.1, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.15, 0.1, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.20, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.20, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.10, 0.10, 0.04, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.10, 0.10, 0.05, 0.03, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.10, 0.10, 0.05, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.10, 0.10, 0.10, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.10, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.30, 0.60, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.10, 0.80, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.00, 0.10, 0.80, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.00, 0.10, 0.80, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.06, 0.01, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.07, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.08, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.09, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.10, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.30, 0.70, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.10, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.10, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.10, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.10, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.10, 0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.10, 0.80, 0.10, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.10, 0.80, 0.10, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.10, 0.00],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.10, 0.80, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.80],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = np.zeros((40,2))
B_rm = np.zeros((40,2))
B_sf = np.zeros((40,2))
B_sm = np.zeros((40,2))
for num_states in range(40):
B_rf[num_states,0] = mu_rf[num_states]
B_rf[num_states,1] = sigma_rf[num_states]
B_rm[num_states,0] = mu_rm[num_states]
B_rm[num_states,1] = sigma_rm[num_states]
B_sf[num_states,0] = mu_sf[num_states]
B_sf[num_states,1] = sigma_sf[num_states]
B_sm[num_states,0] = mu_sm[num_states]
B_sm[num_states,1] = sigma_sm[num_states]
B_rf = B_rf.tolist()
B_rm = B_rm.tolist()
B_sf = B_sf.tolist()
B_sm = B_sm.tolist()
# pi - initial probabilities per state
pi = [0.025] * 40
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
while (trial_number < 6):
# For Training
total_seq = Fmat[0:121,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:121,1:5]
total_seq_rm = total_seq[0:121,36:40]
total_seq_sf = total_seq[0:121,71:75]
total_seq_sm = total_seq[0:121,106:110]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+1:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+36:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+71:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+106:j+110]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0],total_seq[0:121,2:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35],total_seq[0:121,37:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70],total_seq[0:121,72:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105],total_seq[0:121,107:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0],total_seq[0:121,j+2:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35],total_seq[0:121,j+37:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70],total_seq[0:121,j+72:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105],total_seq[0:121,j+107:j+110]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0:2],total_seq[0:121,3:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35:37],total_seq[0:121,38:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70:72],total_seq[0:121,73:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105:107],total_seq[0:121,108:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+2],total_seq[0:121,j+3:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+37],total_seq[0:121,j+38:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+72],total_seq[0:121,j+73:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+107],total_seq[0:121,j+108:j+110]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0:3],total_seq[0:121,4:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35:38],total_seq[0:121,39:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70:73],total_seq[0:121,74:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105:108],total_seq[0:121,109:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+3],total_seq[0:121,j+4:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+38],total_seq[0:121,j+39:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+73],total_seq[0:121,j+74:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+108],total_seq[0:121,j+109:j+110]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:121,0:4]
total_seq_rm = total_seq[0:121,35:39]
total_seq_sf = total_seq[0:121,70:74]
total_seq_sm = total_seq[0:121,105:109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+109]))
j = j+5
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:121,0]
total_seq_rm = total_seq[0:121,35]
total_seq_sf = total_seq[0:121,70]
total_seq_sm = total_seq[0:121,105]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = total_seq[0:121,1]
total_seq_rm = total_seq[0:121,36]
total_seq_sf = total_seq[0:121,71]
total_seq_sm = total_seq[0:121,106]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+1]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+36]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+71]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+106]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[0:121,2]
total_seq_rm = total_seq[0:121,37]
total_seq_sf = total_seq[0:121,72]
total_seq_sm = total_seq[0:121,107]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+2]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+37]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+72]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+107]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[0:121,3]
total_seq_rm = total_seq[0:121,38]
total_seq_sf = total_seq[0:121,73]
total_seq_sm = total_seq[0:121,108]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+3]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+38]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+73]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+108]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:121,4]
total_seq_rm = total_seq[0:121,39]
total_seq_sf = total_seq[0:121,74]
total_seq_sm = total_seq[0:121,109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+109]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[0:121,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
path_rm_obj = model_rm.viterbi(final_ts_obj)
path_sf_obj = model_sf.viterbi(final_ts_obj)
path_sm_obj = model_sm.viterbi(final_ts_obj)
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
trial_number = trial_number + 1
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 7
i = 0
while (k < 29):
arrsum_rf[i] = np.sum(rf_final[k-7:k,0])
arrsum_rm[i] = np.sum(rm_final[k-7:k,0])
arrsum_sf[i] = np.sum(sf_final[k-7:k,0])
arrsum_sm[i] = np.sum(sm_final[k-7:k,0])
i = i+1
k = k+7
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/force_codes/number_of_states/hmm_crossvalidation_force_40_states.py
|
Python
|
mit
| 25,353
|
[
"Gaussian",
"Mayavi"
] |
6b11cd73d2da70b071c7bfb2d57f5f31678f152aee768e78a06a3dd73a8c69b4
|
#
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
class NeighbourListBase(object):
"""Interface for the neighbour list.
mcfm module can use any neighbour list object as long
as it provides the implementation of the two routines below.
"""
def update(self, atoms):
"""Make sure the list is up to date. If clled for the first
time, build the list
Parameters
----------
atoms : ase.Atoms
atoms to initialize the list from
Returns
-------
bool
True of the update was sucesfull
"""
raise NotImplementedError("Must implement this function!")
def get_neighbours(self, a):
"""Return neighbors of atom number a.
A list of indices to neighboring atoms is
returned.
Parameters
----------
a : int
atomic index
Returns
-------
np.array
array of neighbouring indices
"""
raise NotImplementedError("Must implement this function!")
|
libAtoms/matscipy
|
matscipy/calculators/mcfm/neighbour_list_mcfm/neighbour_list_base.py
|
Python
|
lgpl-2.1
| 1,886
|
[
"ASE",
"Matscipy"
] |
675078ecf81fd621ca5714dbf91738290bcd3e3c49c0fd7acd42af8046563624
|
import os
import gc
import sys
import time
import signal
import traceback
import numpy as np
from gpaw.atom.generator import Generator
from gpaw.atom.configurations import parameters
from gpaw.utilities import devnull, compiled_with_sl
from gpaw import setup_paths
from gpaw import mpi
import gpaw
def equal(x, y, tolerance=0, fail=True, msg=''):
"""Compare x and y."""
if not np.isfinite(x - y).any() or (np.abs(x - y) > tolerance).any():
msg = (msg + '%s != %s (error: |%s| > %.9g)' %
(x, y, x - y, tolerance))
if fail:
raise AssertionError(msg)
else:
sys.stderr.write('WARNING: %s\n' % msg)
def findpeak(x, y):
dx = x[1] - x[0]
i = y.argmax()
a, b, c = np.polyfit([-1, 0, 1], y[i - 1:i + 2], 2)
assert a < 0
x = -0.5 * b / a
return dx * (i + x), a * x**2 + b * x + c
def gen(symbol, exx=False, name=None, **kwargs):
if mpi.rank == 0:
if 'scalarrel' not in kwargs:
kwargs['scalarrel'] = True
g = Generator(symbol, **kwargs)
g.run(exx=exx, name=name, use_restart_file=False, **parameters[symbol])
mpi.world.barrier()
if setup_paths[0] != '.':
setup_paths.insert(0, '.')
def wrap_pylab(names=[]):
"""Use Agg backend and prevent windows from popping up."""
import matplotlib
matplotlib.use('Agg')
import pylab
def show(names=names):
if names:
name = names.pop(0)
else:
name = 'fig.png'
pylab.savefig(name)
pylab.show = show
tests = [
'gemm_complex.py',
'mpicomm.py',
'ase3k_version.py',
'numpy_core_multiarray_dot.py',
'eigh.py',
'lapack.py',
'dot.py',
'lxc_fxc.py',
'blas.py',
'erf.py',
'gp2.py',
'kptpar.py',
'non_periodic.py',
'parallel/blacsdist.py',
'gradient.py',
'cg2.py',
'kpt.py',
'lf.py',
'gd.py',
'parallel/compare.py',
'pbe_pw91.py',
'fsbt.py',
'derivatives.py',
'Gauss.py',
'second_derivative.py',
'integral4.py',
'parallel/ut_parallel.py',
'transformations.py',
'parallel/parallel_eigh.py',
'spectrum.py',
'xc.py',
'zher.py',
'pbc.py',
'lebedev.py',
'parallel/ut_hsblacs.py',
'parallel/submatrix_redist.py',
'occupations.py',
'dump_chi0.py',
'cluster.py',
'pw/interpol.py',
'poisson.py',
'pw/lfc.py',
'pw/reallfc.py',
'XC2.py',
'multipoletest.py',
'nabla.py',
'noncollinear/xccorr.py',
'gauss_wave.py',
'harmonic.py',
'atoms_too_close.py',
'screened_poisson.py',
'yukawa_radial.py',
'noncollinear/xcgrid3d.py',
'vdwradii.py',
'lcao_restart.py',
'ase3k.py',
'parallel/ut_kptops.py',
'fileio/idiotproof_setup.py',
'fileio/hdf5_simple.py',
'fileio/hdf5_noncontiguous.py',
'fileio/parallel.py',
'timing.py',
'coulomb.py',
'xcatom.py',
'maxrss.py',
'proton.py',
'pw/moleculecg.py',
'keep_htpsit.py',
'pw/stresstest.py',
'aeatom.py',
'numpy_zdotc_graphite.py',
'lcao_density.py',
'parallel/overlap.py',
'restart.py',
# numpy/scipy tests fail randomly
#'numpy_test.py',
#'scipy_test.py',
'gemv.py',
'ylexpand.py',
'potential.py',
'wfs_io.py',
'fixocc.py',
'nonselfconsistentLDA.py',
'gga_atom.py',
'ds_beta.py',
'gauss_func.py',
'noncollinear/h.py',
'symmetry.py',
'symmetry_ft.py',
'usesymm.py',
'broydenmixer.py',
'mixer.py',
'pes.py',
'wfs_auto.py',
'ewald.py',
'refine.py',
'revPBE.py',
'nonselfconsistent.py',
'hydrogen.py',
'fileio/file_reference.py',
'fixdensity.py',
'bee1.py',
'spinFe3plus.py',
'pw/h.py',
'pw/fulldiag.py',
'pw/fulldiagk.py',
'stdout.py',
'parallel/lcao_complicated.py',
'pw/slab.py',
'spinpol.py',
'plt.py',
'lcao_pair_and_coulomb.py',
'eed.py',
'lrtddft2.py',
'parallel/hamiltonian.py',
'pseudopotential/ah.py',
'laplace.py',
'pw/mgo_hybrids.py',
'lcao_largecellforce.py',
'restart2.py',
'Cl_minus.py',
'fileio/restart_density.py',
'external_potential.py',
'pw/bulk.py',
'pw/fftmixer.py',
'mgga_restart.py',
'vdw/quick.py',
'multipoleH2O.py',
'bulk.py',
'elf.py',
'aluminum_EELS_RPA.py',
'aluminum_EELS_ALDA.py',
'H_force.py',
'parallel/lcao_hamiltonian.py',
'fermisplit.py',
'parallel/ut_redist.py',
'lcao_h2o.py',
'cmrtest/cmr_test2.py',
'h2o_xas.py',
'ne_gllb.py',
'exx_acdf.py',
'asewannier.py',
'exx_q.py',
'ut_rsh.py',
'ut_csh.py',
'spin_contamination.py',
'davidson.py',
'partitioning.py',
'pw/davidson_pw.py',
'cg.py',
'gllbatomic.py',
'lcao_force.py',
'neb.py',
'fermilevel.py',
'h2o_xas_recursion.py',
'diamond_eps.py',
'excited_state.py',
# > 20 sec tests start here (add tests after gemm.py!)
'gemm.py',
'fractional_translations.py',
'rpa_energy_Ni.py',
'LDA_unstable.py',
'si.py',
'blocked_rmm_diis.py',
'lxc_xcatom.py',
'gw_planewave.py',
'degeneracy.py',
'apmb.py',
'vdw/potential.py',
'al_chain.py',
'relax.py',
'fixmom.py',
'CH4.py',
'diamond_absorption.py',
'simple_stm.py',
'gw_method.py',
'lcao_bulk.py',
'constant_electric_field.py',
'parallel/ut_invops.py',
'wannier_ethylene.py',
'parallel/lcao_projections.py',
'guc_force.py',
'test_ibzqpt.py',
'aedensity.py',
'fd2lcao_restart.py',
'gwsi.py',
#'graphene_EELS.py', disabled while work is in progress on response code
'lcao_bsse.py',
'pplda.py',
'revPBE_Li.py',
'si_primitive.py',
'complex.py',
'Hubbard_U.py',
'ldos.py',
'parallel/ut_hsops.py',
'pw/hyb.py',
'pseudopotential/hgh_h2o.py',
'vdw/quick_spin.py',
'scfsic_h2.py',
'lrtddft.py',
'dscf_lcao.py',
'IP_oxygen.py',
'Al2_lrtddft.py',
'rpa_energy_Si.py',
'2Al.py',
'tpss.py',
'be_nltd_ip.py',
'si_xas.py',
'atomize.py',
'chi0.py',
'ralda_energy_H2.py',
'ralda_energy_N2.py',
'ralda_energy_Ni.py',
'ralda_energy_Si.py',
'Cu.py',
'restart_band_structure.py',
'ne_disc.py',
'exx_coarse.py',
'exx_unocc.py',
'Hubbard_U_Zn.py',
'muffintinpot.py',
'diamond_gllb.py',
'h2o_dks.py',
'gw_ppa.py',
'nscfsic.py',
'gw_static.py',
# > 100 sec tests start here (add tests after exx.py!)
'response_na_plasmon.py',
'exx.py',
'pygga.py',
'dipole.py',
'nsc_MGGA.py',
'mgga_sc.py',
'MgO_exx_fd_vs_pw.py',
'lb94.py',
'8Si.py',
'td_na2.py',
'ehrenfest_nacl.py',
'rpa_energy_N2.py',
'beefvdw.py',
#'mbeef.py',
'nonlocalset.py',
'wannierk.py',
'rpa_energy_Na.py',
'coreeig.py',
'pw/si_stress.py',
'ut_tddft.py',
'transport.py',
'vdw/ar2.py',
'bse_sym.py',
'aluminum_testcell.py',
'au02_absorption.py',
'lrtddft3.py',
'scfsic_n2.py',
'fractional_translations_big.py',
'parallel/lcao_parallel.py',
'parallel/lcao_parallel_kpt.py',
'parallel/fd_parallel.py',
'parallel/fd_parallel_kpt.py',
'bse_aluminum.py',
'bse_diamond.py',
'bse_vs_lrtddft.py',
'bse_silicon.py',
'bse_MoS2_cut.py',
'parallel/pblas.py',
'parallel/scalapack.py',
'parallel/scalapack_diag_simple.py',
'parallel/scalapack_mpirecv_crash.py',
'parallel/realspace_blacs.py',
'AA_exx_enthalpy.py',
#'usesymm2.py',
#'eigh_perf.py', # Requires LAPACK 3.2.1 or later
# XXX https://trac.fysik.dtu.dk/projects/gpaw/ticket/230
#'parallel/scalapack_pdlasrt_hang.py',
#'dscf_forces.py',
#'stark_shift.py',
'cmrtest/cmr_test.py',
'cmrtest/cmr_test3.py',
'cmrtest/cmr_test4.py',
'cmrtest/cmr_append.py',
'cmrtest/Li2_atomize.py']
exclude = []
# not available on Windows
if os.name in ['ce', 'nt']:
exclude += ['maxrss.py']
if mpi.size > 1:
exclude += ['maxrss.py',
'pes.py',
'diamond_eps.py',
'nscfsic.py',
'coreeig.py',
'asewannier.py',
'wannier_ethylene.py',
'muffintinpot.py',
'stark_shift.py',
'exx_q.py',
'potential.py',
#'cmrtest/cmr_test3.py',
#'cmrtest/cmr_append.py',
'cmrtest/Li2_atomize.py', # started to hang May 2014
'lcao_pair_and_coulomb.py',
'bse_MoS2_cut.py',
'pw/moleculecg.py',
'pw/davidson_pw.py',
# scipy.weave fails often in parallel due to
# ~/.python*_compiled
# https://github.com/scipy/scipy/issues/1895
'scipy_test.py']
if mpi.size > 2:
exclude += ['neb.py']
if mpi.size < 4:
exclude += ['parallel/pblas.py',
'parallel/scalapack.py',
'parallel/scalapack_diag_simple.py',
'parallel/realspace_blacs.py',
'AA_exx_enthalpy.py',
'bse_aluminum.py',
'bse_diamond.py',
'bse_silicon.py',
'bse_vs_lrtddft.py',
'fileio/parallel.py']
if mpi.size != 4:
exclude += ['parallel/lcao_parallel.py']
exclude += ['parallel/fd_parallel.py']
exclude += ['parallel/scalapack_mpirecv_crash.py']
exclude += ['parallel/scalapack_pdlasrt_hang.py']
if mpi.size == 1 or not compiled_with_sl():
exclude += ['parallel/submatrix_redist.py']
if mpi.size != 1 and not compiled_with_sl():
exclude += ['ralda_energy_H2.py',
'ralda_energy_N2.py',
'ralda_energy_Ni.py',
'ralda_energy_Si.py',
'bse_sym.py',
'bse_silicon.py',
'gwsi.py',
'rpa_energy_N2.py',
'pw/fulldiag.py',
'pw/fulldiagk.py',
'au02_absorption.py']
if mpi.size == 8:
exclude += ['transport.py']
if mpi.size != 8:
exclude += ['parallel/lcao_parallel_kpt.py']
exclude += ['parallel/fd_parallel_kpt.py']
if sys.version_info < (2, 6):
exclude.append('transport.py')
if np.__version__ < '1.6.0':
exclude.append('chi0.py')
for test in exclude:
if test in tests:
tests.remove(test)
class TestRunner:
def __init__(self, tests, stream=sys.__stdout__, jobs=1,
show_output=False):
if mpi.size > 1:
assert jobs == 1
self.jobs = jobs
self.show_output = show_output
self.tests = tests
self.failed = []
self.skipped = []
self.garbage = []
if mpi.rank == 0:
self.log = stream
else:
self.log = devnull
self.n = max([len(test) for test in tests])
def run(self):
self.log.write('=' * 77 + '\n')
if not self.show_output:
sys.stdout = devnull
ntests = len(self.tests)
t0 = time.time()
if self.jobs == 1:
self.run_single()
else:
# Run several processes using fork:
self.run_forked()
sys.stdout = sys.__stdout__
self.log.write('=' * 77 + '\n')
self.log.write('Ran %d tests out of %d in %.1f seconds\n' %
(ntests - len(self.tests) - len(self.skipped),
ntests, time.time() - t0))
self.log.write('Tests skipped: %d\n' % len(self.skipped))
if self.failed:
self.log.write('Tests failed: %d\n' % len(self.failed))
else:
self.log.write('All tests passed!\n')
self.log.write('=' * 77 + '\n')
return self.failed
def run_single(self):
while self.tests:
test = self.tests.pop(0)
try:
self.run_one(test)
except KeyboardInterrupt:
self.tests.append(test)
break
def run_forked(self):
j = 0
pids = {}
while self.tests or j > 0:
if self.tests and j < self.jobs:
test = self.tests.pop(0)
pid = os.fork()
if pid == 0:
exitcode = self.run_one(test)
os._exit(exitcode)
else:
j += 1
pids[pid] = test
else:
try:
while True:
pid, exitcode = os.wait()
if pid in pids:
break
except KeyboardInterrupt:
for pid, test in pids.items():
os.kill(pid, signal.SIGHUP)
self.write_result(test, 'STOPPED', time.time())
self.tests.append(test)
break
if exitcode == 512:
self.failed.append(pids[pid])
elif exitcode == 256:
self.skipped.append(pids[pid])
del pids[pid]
j -= 1
def run_one(self, test):
if self.jobs == 1:
self.log.write('%*s' % (-self.n, test))
self.log.flush()
t0 = time.time()
filename = gpaw.__path__[0] + '/test/' + test
failed = False
skip = False
try:
loc = {}
execfile(filename, loc)
loc.clear()
del loc
self.check_garbage()
except KeyboardInterrupt:
self.write_result(test, 'STOPPED', t0)
raise
except ImportError, ex:
module = ex.args[0].split()[-1].split('.')[0]
if module in ['scipy', 'cmr', '_gpaw_hdf5']:
skip = True
else:
failed = True
except Exception:
failed = True
mpi.ibarrier(timeout=60.0) # guard against parallel hangs
me = np.array(failed)
everybody = np.empty(mpi.size, bool)
mpi.world.all_gather(me, everybody)
failed = everybody.any()
skip = mpi.world.sum(int(skip))
if failed:
self.fail(test, np.argwhere(everybody).ravel(), t0)
exitcode = 2
elif skip:
self.write_result(test, 'SKIPPED', t0)
self.skipped.append(test)
exitcode = 1
else:
self.write_result(test, 'OK', t0)
exitcode = 0
return exitcode
def check_garbage(self):
gc.collect()
n = len(gc.garbage)
self.garbage += gc.garbage
del gc.garbage[:]
assert n == 0, ('Leak: Uncollectable garbage (%d object%s) %s' %
(n, 's'[:n > 1], self.garbage))
def fail(self, test, ranks, t0):
if mpi.rank in ranks:
if sys.version_info >= (2, 4, 0, 'final', 0):
tb = traceback.format_exc()
else: # Python 2.3! XXX
tb = ''
traceback.print_exc()
else:
tb = ''
if mpi.size == 1:
text = 'FAILED!\n%s\n%s%s' % ('#' * 77, tb, '#' * 77)
self.write_result(test, text, t0)
else:
tbs = {tb: [0]}
for r in range(1, mpi.size):
if mpi.rank == r:
mpi.send_string(tb, 0)
elif mpi.rank == 0:
tb = mpi.receive_string(r)
if tb in tbs:
tbs[tb].append(r)
else:
tbs[tb] = [r]
if mpi.rank == 0:
text = ('FAILED! (rank %s)\n%s' %
(','.join([str(r) for r in ranks]), '#' * 77))
for tb, ranks in tbs.items():
if tb:
text += ('\nRANK %s:\n' %
','.join([str(r) for r in ranks]))
text += '%s%s' % (tb, '#' * 77)
self.write_result(test, text, t0)
self.failed.append(test)
def write_result(self, test, text, t0):
t = time.time() - t0
if self.jobs > 1:
self.log.write('%*s' % (-self.n, test))
self.log.write('%10.3f %s\n' % (t, text))
if __name__ == '__main__':
TestRunner(tests).run()
|
robwarm/gpaw-symm
|
gpaw/test/__init__.py
|
Python
|
gpl-3.0
| 16,507
|
[
"GPAW"
] |
e7e2bc2fc3cb108773311f16fe1753b503912e79e0373204df5eb95722b34448
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2021 Tobias Gruetzmacher
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class GoComics(_ParserScraper):
url = 'https://www.gocomics.com/'
imageSearch = '//picture[d:class("item-comic-image")]/img'
prevSearch = '//a[d:class("js-previous-comic")]'
latestSearch = '//div[d:class("gc-deck--cta-0")]//a'
starter = indirectStarter
help = 'Index format: yyyy/mm/dd'
def __init__(self, name, path, lang=None):
super(GoComics, self).__init__('GoComics/' + name)
self.session.add_throttle('www.gocomics.com', 1.0, 2.0)
self.url = 'https://www.gocomics.com/' + path
self.shortname = name
if lang:
self.lang = lang
def namer(self, image_url, page_url):
prefix, year, month, day = page_url.rsplit('/', 3)
return "%s_%s%s%s.gif" % (self.shortname, year, month, day)
def getIndexStripUrl(self, index):
return '{}/{}'.format(self.url, index)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls): # noqa: Allowed to be long
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlGoodwynEditorialCartoons', 'algoodwyn'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('AmericanChopSuey', 'american-chop-suey'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskACat', 'ask-a-cat'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyTrump', 'baby-trump'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('BadReporter', 'badreporter'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('BushyTales', 'bushy-tales'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheapThrillsCuisine', 'cheap-thrills-cuisine'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DanWasserman', 'danwasserman'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
cls('FoxTrot', 'foxtrot'),
cls('FoxTrotClassics', 'foxtrotclassics'),
cls('FoxTrotEnEspanol', 'foxtrotespanol', 'es'),
cls('Francis', 'francis'),
cls('FrankAndErnest', 'frank-and-ernest'),
cls('Frazz', 'frazz'),
cls('FredBasset', 'fredbasset'),
cls('FredBassetEnEspanol', 'fredbassetespanol', 'es'),
cls('FreeRange', 'freerange'),
cls('FreshlySqueezed', 'freshlysqueezed'),
cls('FrogApplause', 'frogapplause'),
cls('Garfield', 'garfield'),
cls('GarfieldClassics', 'garfield-classics'),
cls('GarfieldEnEspanol', 'garfieldespanol', 'es'),
cls('GaryMarkstein', 'garymarkstein'),
cls('GaryVarvel', 'garyvarvel'),
cls('GasolineAlley', 'gasolinealley'),
cls('Gaturro', 'gaturro', 'es'),
cls('Geech', 'geech'),
cls('GetALife', 'getalife'),
cls('GetFuzzy', 'getfuzzy'),
cls('Gil', 'gil'),
cls('GilThorp', 'gilthorp'),
cls('GingerMeggs', 'gingermeggs'),
cls('GingerMeggsEnEspanol', 'gingermeggs-espanol', 'es'),
cls('GlasbergenCartoons', 'glasbergen-cartoons'),
cls('GManWebcomics', 'g-man-webcomics'),
cls('GnomeSyndicate', 'gnome-syndicate'),
cls('Goats', 'goats'),
cls('GrandAvenue', 'grand-avenue'),
cls('GrayMatters', 'gray-matters'),
cls('GreenHumour', 'green-humour'),
cls('HaircutPractice', 'haircut-practice'),
cls('HalfFull', 'half-full'),
cls('Harley', 'harley'),
cls('HeartOfTheCity', 'heartofthecity'),
cls('Heathcliff', 'heathcliff'),
cls('HeathcliffEnEspanol', 'heathcliffespanol', 'es'),
cls('HenryPayne', 'henrypayne'),
cls('HerbAndJamaal', 'herbandjamaal'),
cls('Herman', 'herman'),
cls('HomeAndAway', 'homeandaway'),
cls('HotComicsForCoolPeople', 'hot-comics-for-cool-people'),
cls('HUBRIS', 'hubris'),
cls('HutchOwen', 'hutch-owen'),
cls('ImagineThis', 'imaginethis'),
cls('ImogenQuest', 'imogen-quest'),
cls('InkPen', 'inkpen'),
cls('InSecurity', 'in-security'),
cls('InspectorDangersCrimeQuiz', 'inspector-dangers-crime-quiz'),
cls('InTheBleachers', 'inthebleachers'),
cls('InTheSticks', 'inthesticks'),
cls('InvisibleBread', 'invisible-bread'),
cls('ItsAllAboutYou', 'itsallaboutyou'),
cls('JackOhman', 'jackohman'),
cls('JakeLikesOnions', 'jake-likes-onions'),
cls('JanesWorld', 'janesworld'),
cls('JeffDanziger', 'jeffdanziger'),
cls('JeffStahler', 'jeffstahler'),
cls('JenSorensen', 'jen-sorensen'),
cls('JimBentonCartoons', 'jim-benton-cartoons'),
cls('JimMorin', 'jimmorin'),
cls('JimsJournal', 'jimsjournal'),
cls('JoeHeller', 'joe-heller'),
cls('JoelPett', 'joelpett'),
cls('JoeVanilla', 'joevanilla'),
cls('JoeyAlisonSayersComics', 'joey-alison-sayers-comics'),
cls('JohnDeering', 'johndeering'),
cls('JumpStart', 'jumpstart'),
cls('JunkDrawer', 'junk-drawer'),
cls('JustoYFranco', 'justo-y-franco', 'es'),
cls('KenCatalino', 'kencatalino'),
cls('KevinKallaugher', 'kal'),
cls('KevinNecessaryEditorialCartoons', 'kevin-necessary-editorial-cartoons'),
cls('KidBeowulf', 'kid-beowulf'),
cls('KitchenCapers', 'kitchen-capers'),
cls('Kliban', 'kliban'),
cls('KlibansCats', 'klibans-cats'),
cls('LaCucaracha', 'lacucaracha'),
cls('LaCucarachaEnEspanol', 'la-cucaracha-en-espanol', 'es'),
cls('LaloAlcaraz', 'laloalcaraz'),
cls('LaloAlcarazEnEspanol', 'laloenespanol', 'es'),
cls('LardsWorldPeaceTips', 'lards-world-peace-tips'),
cls('LasHermanasStone', 'stonesoup_espanol', 'es'),
cls('LastKiss', 'lastkiss'),
cls('LaughingRedheadComics', 'laughing-redhead-comics'),
cls('LayLines', 'lay-lines'),
cls('LearnToSpeakCat', 'learn-to-speak-cat'),
cls('LibertyMeadows', 'libertymeadows'),
cls('LifeOnEarth', 'life-on-earth'),
cls('LilAbner', 'lil-abner'),
cls('Lio', 'lio'),
cls('LioEnEspanol', 'lioespanol', 'es'),
cls('LisaBenson', 'lisabenson'),
cls('LittleDogLost', 'littledoglost'),
cls('LittleFriedChickenAndSushi', 'little-fried-chicken-and-sushi'),
cls('LittleNemo', 'little-nemo'),
cls('LizClimoCartoons', 'liz-climo-cartoons'),
cls('Lola', 'lola'),
cls('LolaEnEspanol', 'lola-en-espanol', 'es'),
cls('LongStoryShort', 'long-story-short'),
cls('LooksGoodOnPaper', 'looks-good-on-paper'),
cls('LooseParts', 'looseparts'),
cls('LosOsorios', 'los-osorios', 'es'),
cls('LostSheep', 'lostsheep'),
cls('Luann', 'luann'),
cls('LuannAgainn', 'luann-againn'),
cls('LuannEnEspanol', 'luannspanish', 'es'),
cls('LuckyCow', 'luckycow'),
cls('LugNuts', 'lug-nuts'),
cls('Lunarbaboon', 'lunarbaboon'),
cls('M2Bulls', 'm2bulls'),
cls('Magnificatz', 'magnificatz'),
cls('Maintaining', 'maintaining'),
cls('MakingIt', 'making-it'),
cls('MariasDay', 'marias-day'),
cls('Marmaduke', 'marmaduke'),
cls('MarshallRamsey', 'marshallramsey'),
cls('MattBors', 'matt-bors'),
cls('MattDavies', 'mattdavies'),
cls('MattWuerker', 'mattwuerker'),
cls('MediumLarge', 'medium-large'),
cls('MessycowComics', 'messy-cow'),
cls('MexikidStories', 'mexikid-stories'),
cls('MichaelRamirez', 'michaelramirez'),
cls('MikeDuJour', 'mike-du-jour'),
cls('MikeLester', 'mike-lester'),
cls('MikeLuckovich', 'mikeluckovich'),
cls('MissPeach', 'miss-peach'),
cls('Mo', 'mo'),
cls('ModeratelyConfused', 'moderately-confused'),
cls('Momma', 'momma'),
cls('MomsCancer', 'moms-cancer'),
cls('Monty', 'monty'),
cls('MontyDiaros', 'monty-diaros', 'es'),
cls('MotleyClassics', 'motley-classics'),
cls('MrLowe', 'mr-lowe'),
cls('MustardAndBoloney', 'mustard-and-boloney'),
cls('MuttAndJeff', 'muttandjeff'),
cls('MyDadIsDracula', 'my-dad-is-dracula'),
cls('MythTickle', 'mythtickle'),
cls('Nancy', 'nancy'),
cls('NancyClassics', 'nancy-classics'),
cls('NateElGrande', 'nate-el-grande', 'es'),
cls('NestHeads', 'nestheads'),
cls('NEUROTICA', 'neurotica'),
cls('NewAdventuresOfQueenVictoria', 'thenewadventuresofqueenvictoria'),
cls('NextDoorNeighbors', 'next-door-neighbors'),
cls('NickAnderson', 'nickanderson'),
cls('NickAndZuzu', 'nick-and-zuzu'),
cls('NonSequitur', 'nonsequitur'),
cls('NothingIsNotSomething', 'nothing-is-not-something'),
cls('NotInventedHere', 'not-invented-here'),
cls('NowRecharging', 'now-recharging'),
cls('OffTheMark', 'offthemark'),
cls('OhBrother', 'oh-brother'),
cls('OllieAndQuentin', 'ollie-and-quentin'),
cls('OnAClaireDay', 'onaclaireday'),
cls('OneBigHappy', 'onebighappy'),
cls('OrdinaryBill', 'ordinary-bill'),
cls('OriginsOfTheSundayComics', 'origins-of-the-sunday-comics'),
cls('OurSuperAdventure', 'our-super-adventure'),
cls('Outland', 'outland'),
cls('OutOfTheGenePoolReRuns', 'outofthegenepool'),
cls('Overboard', 'overboard'),
cls('OverboardEnEspanol', 'overboardespanol', 'es'),
cls('OverTheHedge', 'overthehedge'),
cls('OzyAndMillie', 'ozy-and-millie'),
cls('PatOliphant', 'patoliphant'),
cls('PCAndPixel', 'pcandpixel'),
cls('Peanuts', 'peanuts'),
cls('PeanutsBegins', 'peanuts-begins'),
cls('PearlsBeforeSwine', 'pearlsbeforeswine'),
cls('Periquita', 'periquita', 'es'),
cls('PerlasParaLosCerdos', 'perlas-para-los-cerdos', 'es'),
cls('PerryBibleFellowship', 'perry-bible-fellowship'),
cls('PhilHands', 'phil-hands'),
cls('PhoebeAndHerUnicorn', 'phoebe-and-her-unicorn'),
cls('Pibgorn', 'pibgorn'),
cls('PibgornSketches', 'pibgornsketches'),
cls('Pickles', 'pickles'),
cls('PirateMike', 'pirate-mike'),
cls('PleaseListenToMe', 'please-listen-to-me'),
cls('Pluggers', 'pluggers'),
cls('PoochCafe', 'poochcafe'),
cls('Poorcraft', 'poorcraft'),
cls('PoorlyDrawnLines', 'poorly-drawn-lines'),
cls('PotShots', 'pot-shots'),
cls('PreTeena', 'preteena'),
cls('PricklyCity', 'pricklycity'),
cls('PromisesPromises', 'promises-promises'),
cls('QuestionableQuotebook', 'questionable-quotebook'),
cls('RabbitsAgainstMagic', 'rabbitsagainstmagic'),
cls('RaisingDuncan', 'raising-duncan'),
cls('RandolphItch2Am', 'randolphitch'),
cls('RealityCheck', 'realitycheck'),
cls('RealLifeAdventures', 'reallifeadventures'),
cls('RebeccaHendin', 'rebecca-hendin'),
cls('RedAndRover', 'redandrover'),
cls('RedMeat', 'redmeat'),
cls('RichardsPoorAlmanac', 'richards-poor-almanac'),
cls('RipHaywire', 'riphaywire'),
cls('RipleysAunqueUstedNoLoCrea', 'ripleys-en-espanol', 'es'),
cls('RipleysBelieveItOrNot', 'ripleysbelieveitornot'),
cls('RobbieAndBobby', 'robbie-and-bobby'),
cls('RobertAriail', 'robert-ariail'),
cls('RobRogers', 'robrogers'),
cls('Rosebuds', 'rosebuds'),
cls('RoseIsRose', 'roseisrose'),
cls('Rubes', 'rubes'),
cls('RudyPark', 'rudypark'),
cls('SarahsScribbles', 'sarahs-scribbles'),
cls('SaturdayMorningBreakfastCereal', 'saturday-morning-breakfast-cereal'),
cls('SavageChickens', 'savage-chickens'),
cls('ScaryGary', 'scarygary'),
cls('ScenesFromAMultiverse', 'scenes-from-a-multiverse'),
cls('ScottStantis', 'scottstantis'),
cls('ShenComix', 'shen-comix'),
cls('ShirleyAndSonClassics', 'shirley-and-son-classics'),
cls('Shoe', 'shoe'),
cls('SigneWilkinson', 'signewilkinson'),
cls('SketchsharkComics', 'sketchshark-comics'),
cls('SkinHorse', 'skinhorse'),
cls('Skippy', 'skippy'),
cls('SmallPotatoes', 'small-potatoes'),
cls('SnoopyEnEspanol', 'peanuts-espanol', 'es'),
cls('Snowflakes', 'snowflakes'),
cls('SnowSez', 'snow-sez'),
cls('SpeedBump', 'speedbump'),
cls('SpiritOfTheStaircase', 'spirit-of-the-staircase'),
cls('SpotTheFrog', 'spot-the-frog'),
cls('Starling', 'starling'),
cls('SteveBenson', 'stevebenson'),
cls('SteveBreen', 'stevebreen'),
cls('SteveKelley', 'stevekelley'),
cls('StickyComics', 'sticky-comics'),
cls('StoneSoup', 'stonesoup'),
cls('StoneSoupClassics', 'stone-soup-classics'),
cls('StrangeBrew', 'strangebrew'),
cls('StuartCarlson', 'stuartcarlson'),
cls('StudioJantze', 'studio-jantze'),
cls('SunnyStreet', 'sunny-street'),
cls('SunshineState', 'sunshine-state'),
cls('SuperFunPakComix', 'super-fun-pak-comix'),
cls('SwanEaters', 'swan-eaters'),
cls('SweetAndSourPork', 'sweet-and-sour-pork'),
cls('Sylvia', 'sylvia'),
cls('TankMcNamara', 'tankmcnamara'),
cls('Tarzan', 'tarzan'),
cls('TarzanEnEspanol', 'tarzan-en-espanol', 'es'),
cls('TedRall', 'ted-rall'),
cls('TenCats', 'ten-cats'),
cls('TextsFromMittens', 'texts-from-mittens'),
cls('Thatababy', 'thatababy'),
cls('ThatIsPriceless', 'that-is-priceless'),
cls('ThatNewCarlSmell', 'that-new-carl-smell'),
cls('TheAcademiaWaltz', 'academiawaltz'),
cls('TheAdventuresOfBusinessCat', 'the-adventures-of-business-cat'),
cls('TheArgyleSweater', 'theargylesweater'),
cls('TheAwkwardYeti', 'the-awkward-yeti'),
cls('TheBarn', 'thebarn'),
cls('TheBigPicture', 'thebigpicture'),
cls('TheBoondocks', 'boondocks'),
cls('TheBornLoser', 'the-born-loser'),
cls('TheBuckets', 'thebuckets'),
cls('TheCity', 'thecity'),
cls('TheComicStripThatHasAFinaleEveryDay', 'the-comic-strip-that-has-a-finale-every-day'),
cls('TheDailyDrawing', 'the-daily-drawing'),
cls('TheDinetteSet', 'dinetteset'),
cls('TheDoozies', 'thedoozies'),
cls('TheDuplex', 'duplex'),
cls('TheElderberries', 'theelderberries'),
cls('TheFlyingMcCoys', 'theflyingmccoys'),
cls('TheFuscoBrothers', 'thefuscobrothers'),
cls('TheGrizzwells', 'thegrizzwells'),
cls('TheHumbleStumble', 'humble-stumble'),
cls('TheKChronicles', 'thekchronicles'),
cls('TheKnightLife', 'theknightlife'),
cls('TheLastMechanicalMonster', 'the-last-mechanical-monster'),
cls('TheLeftyBoscoPictureShow', 'leftyboscopictureshow'),
cls('TheMartianConfederacy', 'the-martian-confederacy'),
cls('TheMeaningOfLila', 'meaningoflila'),
cls('TheMiddleAge', 'the-middle-age'),
cls('TheMiddletons', 'themiddletons'),
cls('TheNormClassics', 'thenorm'),
cls('TheOtherCoast', 'theothercoast'),
cls('TheOtherEnd', 'the-other-end'),
cls('TheUpsideDownWorldOfGustaveVerbeek', 'upside-down-world-of-gustave-verbeek'),
cls('TheWanderingMelon', 'the-wandering-melon'),
cls('TheWizardOfIdSpanish', 'wizardofidespanol', 'es'),
cls('TheWorriedWell', 'the-worried-well'),
cls('think', 'think'),
cls('ThinLines', 'thinlines'),
cls('TimCampbell', 'tim-campbell'),
cls('TinySepuku', 'tinysepuku'),
cls('TodaysSzep', 'todays-szep'),
cls('TomTheDancingBug', 'tomthedancingbug'),
cls('TomToles', 'tomtoles'),
cls('TooMuchCoffeeMan', 'toomuchcoffeeman'),
cls('ToughTown', 'tough-town'),
cls('Trivquiz', 'trivquiz'),
cls('Trucutu', 'trucutu', 'es'),
cls('TruthFacts', 'truth-facts'),
cls('Tutelandia', 'tutelandia', 'es'),
cls('TwoPartyOpera', 'two-party-opera'),
cls('UnderpantsAndOverbites', 'underpants-and-overbites'),
cls('UnderstandingChaos', 'understanding-chaos'),
cls('UnstrangePhenomena', 'unstrange-phenomena'),
cls('ViewsAfrica', 'viewsafrica'),
cls('ViewsAmerica', 'viewsamerica'),
cls('ViewsAsia', 'viewsasia'),
cls('ViewsBusiness', 'viewsbusiness'),
cls('ViewsEurope', 'viewseurope'),
cls('ViewsLatinAmerica', 'viewslatinamerica'),
cls('ViewsMidEast', 'viewsmideast'),
cls('ViewsOfTheWorld', 'viewsoftheworld'),
cls('ViiviAndWagner', 'viivi-and-wagner'),
cls('WallaceTheBrave', 'wallace-the-brave'),
cls('WaltHandelsman', 'walthandelsman'),
cls('Warped', 'warped'),
cls('WatchYourHead', 'watchyourhead'),
cls('Wawawiwa', 'wawawiwa'),
cls('WaynoVision', 'waynovision'),
cls('WeePals', 'weepals'),
cls('Widdershins', 'widdershins'),
cls('WideOpen', 'wide-open'),
cls('WinLoseDrew', 'drewlitton'),
cls('Winston', 'winston'),
cls('WizardOfId', 'wizardofid'),
cls('WizardOfIdClassics', 'wizard-of-id-classics'),
cls('Wondermark', 'wondermark'),
cls('WorkingDaze', 'working-daze'),
cls('WorkingItOut', 'workingitout'),
cls('WrongHands', 'wrong-hands'),
cls('WTDuck', 'wtduck'),
cls('WuMo', 'wumo'),
cls('WumoEnEspanol', 'wumoespanol', 'es'),
cls('Yaffle', 'yaffle'),
cls('YesImHotInThis', 'yesimhotinthis'),
cls('ZackHill', 'zackhill'),
cls('ZenPencils', 'zen-pencils'),
cls('Ziggy', 'ziggy'),
cls('ZiggyEnEspanol', 'ziggyespanol', 'es'),
# END AUTOUPDATE
)
|
webcomics/dosage
|
dosagelib/plugins/gocomics.py
|
Python
|
mit
| 25,793
|
[
"Brian"
] |
ca8d29e1bde1f940278a38d77dede59ab0761a845fabc6d3ec53923a15c68987
|
#!/usr/bin/env python
#
# $File: ancestralPop.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(500, loci=1, ancGen=2)
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[0.5, 0.5])
],
matingScheme = sim.RandomMating(),
postOps=[
sim.Stat(alleleFreq=0, begin=-3),
sim.PyEval(r"'%.3f\n' % alleleFreq[0][0]", begin=-3)
],
gen = 20
)
# information
pop.ancestralGens()
pop.popSize(ancGen=1)
pop.setVirtualSplitter(sim.SexSplitter())
# number of males in the current and parental generation
pop.subPopSize((0,0)), pop.subPopSize((0,0), ancGen=1)
# start from current generation
for i in range(pop.ancestralGens(), -1, -1):
pop.useAncestralGen(i)
sim.stat(pop, alleleFreq=0)
print('%d %.3f' % (i, pop.dvars().alleleFreq[0][0]))
# restore to the current generation
pop.useAncestralGen(0)
|
BoPeng/simuPOP
|
docs/ancestralPop.py
|
Python
|
gpl-2.0
| 1,892
|
[
"VisIt"
] |
2786bfd32c70f1fe03b642e6876aa2dfe710338aaa0591aecc685b485f579905
|
"""
Basic usage example on a ply mesh. Note that this require a closed, manifold
input mesh.
"""
##
import os
import numpy as np
import mayavi.mlab as mlab
import itertools
import utils
import ply
import meshcut
##
if __name__ == '__main__':
##
example_dir = os.path.join(os.path.dirname(meshcut.__file__), 'examples')
example_fname = os.path.join(example_dir, 'data', 'mesh.ply')
with open(example_fname) as f:
verts, faces, _ = ply.load_ply(f)
mesh = meshcut.TriangleMesh(verts, faces)
##
def show(plane, expected_n_contours):
P = meshcut.cross_section_mesh(mesh, plane)
colors = [
(0, 1, 1),
(1, 0, 1),
(0, 0, 1)
]
print("num contours : ", len(P), ' expected : ', expected_n_contours)
if True:
utils.trimesh3d(mesh.verts, mesh.tris, color=(1, 1, 1),
opacity=0.5)
utils.show_plane(plane.orig, plane.n, scale=1, color=(1, 0, 0),
opacity=0.5)
for p, color in zip(P, itertools.cycle(colors)):
p = np.array(p)
mlab.plot3d(p[:, 0], p[:, 1], p[:, 2], tube_radius=None,
line_width=3.0, color=color)
return P
##
# This will align the plane with some edges, so this is a good test
# for vertices intersection handling
plane_orig = (1.28380000591278076172, -0.12510000169277191162, 0)
plane_norm = (1, 0, 0)
plane = meshcut.Plane(plane_orig, plane_norm)
show(plane, expected_n_contours=3)
mlab.show()
##
# This will align the plane with some edges, so this is a good test
# for vertices intersection handling
plane_orig = (0.93760002, -0.12909999, 0)
plane_norm = (1, 0, 0)
plane = meshcut.Plane(plane_orig, plane_norm)
show(plane, expected_n_contours=1)
mlab.show()
##
plane_orig = (1, 0, 0)
plane_norm = (1, 0, 0)
plane = meshcut.Plane(plane_orig, plane_norm)
show(plane, expected_n_contours=3)
mlab.show()
##
plane_orig = (0.7, 0, 0)
plane_norm = (0.2, 0.5, 0.3)
plane = meshcut.Plane(plane_orig, plane_norm)
show(plane, expected_n_contours=2)
mlab.show()
##
|
julienr/meshcut
|
examples/0_cross_section.py
|
Python
|
mit
| 2,254
|
[
"Mayavi"
] |
07390a79283d4e437d366e827d18f333dde22f005d2c63a33cf4c8f40473f7b3
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2011 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from ._disconnected import Disconnected
from ._everyone import Everyone
from ._familywithincompleteevent import FamilyWithIncompleteEvent
from ._hasaddress import HasAddress
from ._hasalternatename import HasAlternateName
from ._hasassociation import HasAssociation
from ._hasattribute import HasAttribute
from ._hasbirth import HasBirth
from ._hascitation import HasCitation
from ._hascommonancestorwith import HasCommonAncestorWith
from ._hascommonancestorwithfiltermatch import HasCommonAncestorWithFilterMatch
from ._hasdeath import HasDeath
from ._hasevent import HasEvent
from ._hasfamilyattribute import HasFamilyAttribute
from ._hasfamilyevent import HasFamilyEvent
from ._hasgallery import HavePhotos
from ._hasidof import HasIdOf
from ._haslds import HasLDS
from ._hasnameof import HasNameOf
from ._hasnameorigintype import HasNameOriginType
from ._hasnametype import HasNameType
from ._hasnickname import HasNickname
from ._hasnote import HasNote
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hasnoteregexp import HasNoteRegexp
from ._hasrelationship import HasRelationship
from ._hassourcecount import HasSourceCount
from ._hassourceof import HasSourceOf
from ._hastag import HasTag
from ._hastextmatchingregexpof import HasTextMatchingRegexpOf
from ._hastextmatchingsubstringof import HasTextMatchingSubstringOf
from ._hasunknowngender import HasUnknownGender
from ._havealtfamilies import HaveAltFamilies
from ._havechildren import HaveChildren
from ._incompletenames import IncompleteNames
from ._isancestorof import IsAncestorOf
from ._isancestoroffiltermatch import IsAncestorOfFilterMatch
from ._isbookmarked import IsBookmarked
from ._ischildoffiltermatch import IsChildOfFilterMatch
from ._isdefaultperson import IsDefaultPerson
from ._isdescendantfamilyof import IsDescendantFamilyOf
from ._isdescendantfamilyoffiltermatch import IsDescendantFamilyOfFilterMatch
from ._isdescendantof import IsDescendantOf
from ._isdescendantoffiltermatch import IsDescendantOfFilterMatch
from ._isduplicatedancestorof import IsDuplicatedAncestorOf
from ._isfemale import IsFemale
from ._islessthannthgenerationancestorof import \
IsLessThanNthGenerationAncestorOf
from ._islessthannthgenerationancestorofbookmarked import \
IsLessThanNthGenerationAncestorOfBookmarked
from ._islessthannthgenerationancestorofdefaultperson import \
IsLessThanNthGenerationAncestorOfDefaultPerson
from ._islessthannthgenerationdescendantof import \
IsLessThanNthGenerationDescendantOf
from ._ismale import IsMale
from ._ismorethannthgenerationancestorof import \
IsMoreThanNthGenerationAncestorOf
from ._ismorethannthgenerationdescendantof import \
IsMoreThanNthGenerationDescendantOf
from ._isparentoffiltermatch import IsParentOfFilterMatch
from ._issiblingoffiltermatch import IsSiblingOfFilterMatch
from ._isspouseoffiltermatch import IsSpouseOfFilterMatch
from ._iswitness import IsWitness
from ._matchesfilter import MatchesFilter
from ._matcheseventfilter import MatchesEventFilter
from ._matchessourceconfidence import MatchesSourceConfidence
from ._missingparent import MissingParent
from ._multiplemarriages import MultipleMarriages
from ._nevermarried import NeverMarried
from ._nobirthdate import NoBirthdate
from ._nodeathdate import NoDeathdate
from ._peopleprivate import PeoplePrivate
from ._peoplepublic import PeoplePublic
from ._personwithincompleteevent import PersonWithIncompleteEvent
from ._probablyalive import ProbablyAlive
from ._relationshippathbetween import RelationshipPathBetween
from ._deeprelationshippathbetween import DeepRelationshipPathBetween
from ._relationshippathbetweenbookmarks import RelationshipPathBetweenBookmarks
from ._searchname import SearchName
from ._regexpname import RegExpName
from ._matchidof import MatchIdOf
from ._regexpidof import RegExpIdOf
from ._changedsince import ChangedSince
from ._isrelatedwith import IsRelatedWith
#-------------------------------------------------------------------------
#
# This is used by Custom Filter Editor tool
#
#-------------------------------------------------------------------------
editor_rule_list = [
Everyone,
IsFemale,
HasUnknownGender,
IsMale,
IsDefaultPerson,
IsBookmarked,
HasAlternateName,
HasAddress,
HasAssociation,
HasIdOf,
HasLDS,
HasNameOf,
HasNameOriginType,
HasNameType,
HasNickname,
HasRelationship,
HasDeath,
HasBirth,
HasCitation,
HasEvent,
HasFamilyEvent,
HasAttribute,
HasFamilyAttribute,
HasTag,
HasSourceCount,
HasSourceOf,
HaveAltFamilies,
HavePhotos,
HaveChildren,
IncompleteNames,
NeverMarried,
MultipleMarriages,
NoBirthdate,
NoDeathdate,
PersonWithIncompleteEvent,
FamilyWithIncompleteEvent,
ProbablyAlive,
PeoplePrivate,
PeoplePublic,
IsWitness,
IsDescendantOf,
IsDescendantFamilyOf,
IsDescendantFamilyOfFilterMatch,
IsLessThanNthGenerationAncestorOfDefaultPerson,
IsDescendantOfFilterMatch,
IsDuplicatedAncestorOf,
IsLessThanNthGenerationDescendantOf,
IsMoreThanNthGenerationDescendantOf,
IsAncestorOf,
IsAncestorOfFilterMatch,
IsLessThanNthGenerationAncestorOf,
IsLessThanNthGenerationAncestorOfBookmarked,
IsMoreThanNthGenerationAncestorOf,
HasCommonAncestorWith,
HasCommonAncestorWithFilterMatch,
MatchesFilter,
MatchesEventFilter,
MatchesSourceConfidence,
MissingParent,
IsChildOfFilterMatch,
IsParentOfFilterMatch,
IsSpouseOfFilterMatch,
IsSiblingOfFilterMatch,
RelationshipPathBetween,
DeepRelationshipPathBetween,
RelationshipPathBetweenBookmarks,
HasTextMatchingSubstringOf,
HasNote,
HasNoteRegexp,
RegExpIdOf,
Disconnected,
ChangedSince,
IsRelatedWith,
]
|
sam-m888/gprime
|
gprime/filters/rules/person/__init__.py
|
Python
|
gpl-2.0
| 6,834
|
[
"Brian"
] |
dd74db8cbe5d1ff5bff3a9df8dd0ef5f30789327520dcd958aff436667b2b19c
|
""" This is the guy that actually modifies the content of the CS
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import zlib
import difflib
from diraccfg import CFG
from DIRAC.Core.Utilities import List, Time
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
__RCSID__ = "$Id$"
class Modificator(object):
def __init__(self, rpcClient=False, commiterId="unknown"):
self.commiterTag = "@@-"
self.commiterId = commiterId
self.cfgData = CFG()
self.rpcClient = None
if rpcClient:
self.setRPCClient(rpcClient)
def loadCredentials(self):
retVal = getProxyInfo()
if retVal["OK"]:
credDict = retVal["Value"]
self.commiterId = "%s@%s - %s" % (
credDict["username"],
credDict["group"],
Time.dateTime().strftime("%Y-%m-%d %H:%M:%S"),
)
return retVal
return retVal
def setRPCClient(self, rpcClient):
self.rpcClient = rpcClient
def loadFromRemote(self):
retVal = self.rpcClient.getCompressedData()
if retVal["OK"]:
self.cfgData = CFG()
data = retVal["Value"]
if isinstance(data, str):
data = data.encode(errors="surrogateescape")
self.cfgData.loadFromBuffer(zlib.decompress(data).decode())
return retVal
def getCFG(self):
return self.cfgData
def getSections(self, sectionPath):
return gConfigurationData.getSectionsFromCFG(sectionPath, self.cfgData)
def getComment(self, sectionPath):
return gConfigurationData.getCommentFromCFG(sectionPath, self.cfgData)
def getOptions(self, sectionPath):
return gConfigurationData.getOptionsFromCFG(sectionPath, self.cfgData)
def getOptionsDict(self, sectionPath):
"""Gives the options of a CS section in a Python dict with values as
lists"""
opts = self.getOptions(sectionPath)
pathDict = dict((o, self.getValue("%s/%s" % (sectionPath, o))) for o in opts)
return pathDict
def getDictRootedAt(self, relpath="", root=""):
"""Gives the configuration rooted at path in a Python dict. The
result is a Python dictionary that reflects the structure of the
config file."""
def getDictRootedAt(path):
retval = {}
opts = self.getOptionsDict(path)
secs = self.getSections(path)
for k in opts:
retval[k] = opts[k]
for i in secs:
retval[i] = getDictRootedAt(path + "/" + i)
return retval
return getDictRootedAt(root + "/" + relpath)
def getValue(self, optionPath):
return gConfigurationData.extractOptionFromCFG(optionPath, self.cfgData)
def sortAlphabetically(self, path, ascending=True):
cfg = self.__getParentCFG(path, parentLevel=0)
if cfg:
if cfg.sortAlphabetically(ascending):
self.__setCommiter(path)
def __getParentCFG(self, path, parentLevel=1):
sectionList = List.fromChar(path, "/")
cfg = self.cfgData
try:
if parentLevel > 0:
sectionList = sectionList[:-parentLevel]
for section in sectionList:
cfg = cfg[section]
return cfg
except Exception:
return False
def __setCommiter(self, entryPath, cfg=False):
if not cfg:
cfg = self.__getParentCFG(entryPath)
entry = List.fromChar(entryPath, "/")[-1]
comment = cfg.getComment(entry)
filteredComment = [line.strip() for line in comment.split("\n") if line.find(self.commiterTag) != 0]
filteredComment.append("%s%s" % (self.commiterTag, self.commiterId))
cfg.setComment(entry, "\n".join(filteredComment))
def setOptionValue(self, optionPath, value):
levelList = [level.strip() for level in optionPath.split("/") if level.strip() != ""]
parentPath = "/%s" % "/".join(levelList[:-1])
optionName = List.fromChar(optionPath, "/")[-1]
self.createSection(parentPath)
cfg = self.__getParentCFG(optionPath)
if not cfg:
return
cfg.setOption(optionName, value)
self.__setCommiter(optionPath, cfg)
def createSection(self, sectionPath):
levelList = [level.strip() for level in sectionPath.split("/") if level.strip() != ""]
currentPath = ""
cfg = self.cfgData
createdSection = False
for section in levelList:
currentPath += "/%s" % section
if section not in cfg.listSections():
cfg.createNewSection(section)
self.__setCommiter(currentPath)
createdSection = True
cfg = cfg[section]
return createdSection
def setComment(self, entryPath, value):
cfg = self.__getParentCFG(entryPath)
entry = List.fromChar(entryPath, "/")[-1]
if cfg.setComment(entry, value):
self.__setCommiter(entryPath)
return True
return False
def existsSection(self, sectionPath):
sectionList = List.fromChar(sectionPath, "/")
cfg = self.cfgData
try:
for section in sectionList[:-1]:
cfg = cfg[section]
return len(sectionList) == 0 or sectionList[-1] in cfg.listSections()
except Exception:
return False
def existsOption(self, optionPath):
sectionList = List.fromChar(optionPath, "/")
cfg = self.cfgData
try:
for section in sectionList[:-1]:
cfg = cfg[section]
return sectionList[-1] in cfg.listOptions()
except Exception:
return False
def renameKey(self, path, newName):
parentCfg = self.cfgData.getRecursive(path, -1)
if not parentCfg:
return False
pathList = List.fromChar(path, "/")
oldName = pathList[-1]
if parentCfg["value"].renameKey(oldName, newName):
pathList[-1] = newName
self.__setCommiter("/%s" % "/".join(pathList))
return True
else:
return False
def copyKey(self, originalKeyPath, newKey):
parentCfg = self.cfgData.getRecursive(originalKeyPath, -1)
if not parentCfg:
return False
pathList = List.fromChar(originalKeyPath, "/")
originalKey = pathList[-1]
if parentCfg["value"].copyKey(originalKey, newKey):
self.__setCommiter("/%s/%s" % ("/".join(pathList[:-1]), newKey))
return True
return False
def removeOption(self, optionPath):
if not self.existsOption(optionPath):
return False
cfg = self.__getParentCFG(optionPath)
optionName = List.fromChar(optionPath, "/")[-1]
return cfg.deleteKey(optionName)
def removeSection(self, sectionPath):
if not self.existsSection(sectionPath):
return False
cfg = self.__getParentCFG(sectionPath)
sectionName = List.fromChar(sectionPath, "/")[-1]
return cfg.deleteKey(sectionName)
def loadFromBuffer(self, data):
self.cfgData = CFG()
self.cfgData.loadFromBuffer(data)
def loadFromFile(self, filename):
self.cfgData = CFG()
self.mergeFromFile(filename)
def dumpToFile(self, filename):
with open(filename, "wt") as fd:
fd.write(str(self.cfgData))
def mergeFromFile(self, filename):
cfg = CFG()
cfg.loadFromFile(filename)
self.cfgData = self.cfgData.mergeWith(cfg)
def mergeFromCFG(self, cfg):
self.cfgData = self.cfgData.mergeWith(cfg)
def mergeSectionFromCFG(self, sectionPath, cfg):
parentDict = self.cfgData.getRecursive(sectionPath, -1)
parentCFG = parentDict["value"]
secName = [lev.strip() for lev in sectionPath.split("/") if lev.strip()][-1]
secCFG = parentCFG[secName]
if not secCFG:
return False
mergedCFG = secCFG.mergeWith(cfg)
parentCFG.deleteKey(secName)
parentCFG.createNewSection(secName, parentDict["comment"], mergedCFG)
self.__setCommiter(sectionPath)
return True
def __str__(self):
return str(self.cfgData)
def commit(self):
compressedData = zlib.compress(str(self.cfgData).encode(), 9)
return self.rpcClient.commitNewData(compressedData)
def getHistory(self, limit=0):
retVal = self.rpcClient.getCommitHistory(limit)
if retVal["OK"]:
return retVal["Value"]
return []
def showCurrentDiff(self):
retVal = self.rpcClient.getCompressedData()
if retVal["OK"]:
data = retVal["Value"]
if isinstance(data, str):
data = data.encode(errors="surrogateescape")
remoteData = zlib.decompress(data).decode().splitlines()
localData = str(self.cfgData).splitlines()
return difflib.ndiff(remoteData, localData)
return []
def getVersionDiff(self, fromDate, toDate):
retVal = self.rpcClient.getVersionContents([fromDate, toDate])
if retVal["OK"]:
fromData = retVal["Value"][0]
if isinstance(fromData, str):
fromData = fromData.encode(errors="surrogateescape")
fromData = zlib.decompress(fromData).decode()
toData = retVal["Value"][1]
if isinstance(toData, str):
toData = toData.encode(errors="surrogateescape")
toData = zlib.decompress(toData).decode()
return difflib.ndiff(fromData.split("\n"), toData.split("\n"))
return []
def mergeWithServer(self):
retVal = self.rpcClient.getCompressedData()
if retVal["OK"]:
remoteCFG = CFG()
data = retVal["Value"]
if isinstance(data, str):
data = data.encode(errors="surrogateescape")
remoteCFG.loadFromBuffer(zlib.decompress(data).decode())
serverVersion = gConfigurationData.getVersion(remoteCFG)
self.cfgData = remoteCFG.mergeWith(self.cfgData)
gConfigurationData.setVersion(serverVersion, self.cfgData)
return retVal
def rollbackToVersion(self, version):
return self.rpcClient.rollbackToVersion(version)
def updateGConfigurationData(self):
gConfigurationData.setRemoteCFG(self.cfgData)
|
ic-hep/DIRAC
|
src/DIRAC/ConfigurationSystem/private/Modificator.py
|
Python
|
gpl-3.0
| 10,681
|
[
"DIRAC"
] |
e4377fd80c896d21cb7f203e0e41b085bd0683c6c6d2d74686db7dbd762e2142
|
import base64
import hmac
import json
import requests
import time
import datetime
import urllib
import os
import uuid
import re
from hashlib import sha1
from hashlib import md5
def parse_time(timestr):
format = "%Y-%m-%d %H:%M:%S"
return datetime.datetime.fromtimestamp(
time.mktime(time.strptime(timestr, format))
).strftime('%Y-%m-%d %H:%M:%S')
class Location:
def __init__(self, latitude, longitude, delta=None):
self.latitude = latitude
self.longitude = longitude
if delta is None:
delta = "0.030000"
self.delta = delta
def __str__(self):
return "Location(%s, %s)" % (self.latitude, self.longitude)
class PeekLocation:
def __init__(self, raw):
self.id = raw['peekID']
self.can_submit = bool(raw['canSubmit'])
self.name = raw['location']
lat = raw['latitude']
lon = raw['longitude']
d = raw['delta']
self.location = Location(lat, lon, d)
class Comment:
def __init__(self, raw, message_id, client):
self.client = client
self.message_id = message_id
self.comment_id = raw["commentID"]
self.comment = raw["comment"]
self.time = parse_time(raw["time"])
self.likes = int(raw["numberOfLikes"])
self.poster_id = raw["posterID"]
self.liked = int(raw["liked"])
try:
self.message_id = self.message_id.replace('\\', '')
except:
pass
def upvote(self):
if self.liked == 0:
self.likes += 1
self.liked += 1
return self.client.upvote_comment(self.comment_id)
def downvote(self):
if self.liked == 0:
self.likes -= 1
self.liked -= 1
return self.client.downvote_comment(self.comment_id)
def report(self):
return self.client.report_comment(self.comment_id, self.message_id)
def delete(self):
if self.poster_id == self.client.id:
return self.client.delete_comment(self.comment_id, self.message_id)
def reply(self, comment):
return self.client.post_comment(self.message_id, comment)
def print_comment(self):
try:
my_action = ""
if self.liked > 0:
my_action = "^ "
elif self.liked < 0:
my_action = "v "
print ("\t\t%s(%s) %s \n\n\t\tPosted %s" % (my_action, self.likes, self.comment, self.time))
# Fix for emoji crash: filter emoji if not supported
except UnicodeEncodeError:
self.comment = re.sub('[^\x00-\x7F]', '',self.comment)
my_action = ""
if self.liked > 0:
my_action = "^ "
elif self.liked < 0:
my_action = "v "
print ("\t\t%s(%s) %s \n\n\t\tPosted %s" % (my_action, self.likes, self.comment, self.time))
class Yak:
def __init__(self, raw, client):
self.client = client
self.poster_id = raw["posterID"]
self.hide_pin = bool(int(raw["hidePin"]))
self.message_id = raw["messageID"]
try:
self.delivery_id = raw["deliveryID"]
except KeyError:
pass
self.longitude = raw["longitude"]
self.comments = int(raw["comments"])
self.time = parse_time(raw["time"])
self.latitude = raw["latitude"]
self.likes = int(raw["numberOfLikes"])
self.message = raw["message"]
self.liked = False
self.reyaked = False
try:
self.type = raw["type"]
self.liked = int(raw["liked"])
self.reyaked = raw["reyaked"]
except KeyError:
pass
#Yaks don't always have a handle
try:
self.handle = raw["handle"]
except KeyError:
self.handle = None
#For some reason this seems necessary
try:
self.message_id = self.message_id.replace('\\', '')
except:
pass
def upvote(self):
if self.liked == 0:
self.liked += 1
self.likes += 1
return self.client.upvote_yak(self.message_id)
def downvote(self):
if self.liked == 0:
self.liked -= 1
self.likes -= 1
return self.client.downvote_yak(self.message_id)
def report(self):
return self.client.report_yak(self.message_id)
def delete(self):
if self.poster_id == self.client.id:
return self.client.delete_yak(self.message_id)
def add_comment(self, comment):
return self.client.post_comment(self.message_id, comment)
def get_comments(self):
return self.client.get_comments(self.message_id)
def print_yak(self):
try:
if self.handle is not None:
print ("### %s ###" % self.handle)
print ()
print (self.message)
# Show arrow if yak is upvoted or downvoted
my_action = ""
if self.liked > 0:
my_action = "^ "
elif self.liked < 0:
my_action = "v "
print ("\n\t%s%s likes | Posted %s at %s %s" % (my_action, self.likes, self.time, self.latitude, self.longitude))
# Fix for emoji crash: filter emoji if not supported
except UnicodeEncodeError:
self.message = re.sub('[^\x00-\x7F]', '',self.message)
if self.handle is not None:
print ("### %s ###" % self.handle.encode('utf-8').strip())
print ()
print (self.message)
# Show arrow if yak is upvoted or downvoted
my_action = ""
if self.liked > 0:
my_action = "^ "
elif self.liked < 0:
my_action = "v "
print ("\n\t%s%s likes | Posted %s at %s %s" % (my_action, self.likes, self.time, self.latitude, self.longitude))
class Yakker:
base_url = "https://us-east-api.yikyakapi.net/api/"
#user_agent = "Dalvik/1.6.0 (Linux; U; Android 4.3; Samsung Galaxy S4 - 4.3 - API 18 - 1080x1920 Build/JLS36G)"
user_agent = "Yik Yak/2.3.4 (iPhone; iOS 8.3; Scale/3.00)"
version = '2.3.4'
def __init__(self, user_id=None, location=None, force_register=False):
if location is None:
location = Location('0', '0')
self.update_location(location)
if user_id is None:
user_id = self.gen_id()
self.register_id_new(user_id)
elif force_register:
self.register_id_new(user_id)
self.id = user_id
self.handle = None
#self.update_stats()
def gen_id(self):
# Thanks for the fix: ryhanson
return str(uuid.uuid4()).upper()
def register_id_new(self, id):
params = {
"userID": id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
"version": self.version,
}
result = self.get("registerUser", params)
return result
def sign_request(self, page, params):
key = "EF64523D2BD1FA21F18F5BC654DFC41B"
#key = 'F7CAFA2F-FE67-4E03-A090-AC7FFF010729'
#The salt is just the current time in seconds since epoch
salt = str(int(time.time()))
#The message to be signed is essentially the request, with parameters sorted
msg = "/api/" + page
sorted_params = list(params.keys())
sorted_params.sort()
if len(params) > 0:
msg += "?"
for param in sorted_params:
msg += "%s=%s&" % (param, params[param])
#Chop off last "&"
if len(params) > 0:
msg = msg[:-1]
#the salt is just appended directly
msg += salt
#Calculate the signature
h = hmac.new(key.encode(), msg.encode(), sha1)
hash = base64.b64encode(h.digest())
return hash, salt
def post_sign_request(self, page, params):
#key = "F7CAFA2F-FE67-4E03-A090-AC7FFF010729"
key = 'EF64523D2BD1FA21F18F5BC654DFC41B'
#The salt is just the current time in seconds since epoch
salt = str(int(time.time()))
#The message to be signed is essentially the request, with parameters sorted
msg = "/api/" + page
#the salt is just appended directly
msg += salt
#Calculate the signature
h = hmac.new(key.encode(), msg.encode(), sha1)
hash = base64.b64encode(h.digest())
return hash, salt
def get(self, page, params):
url = self.base_url + page
hash, salt = self.sign_request(page, params)
params['hash'] = hash
params['salt'] = salt
headers = {
"User-Agent": self.user_agent,
"Accept-Encoding": "gzip",
#"Cookie": "lat=" + self.location.latitude + "; long=" + self.location.longitude + "; pending=deleted; expires=Thu,01-Jan-1970 00:00:01 GMT;Max-Age=0",
}
return requests.get(url, params=params, headers=headers)
def post(self, page, params):
url = self.base_url + page
hash, salt = self.post_sign_request(page, params)
getparams = {'hash': hash, 'salt': salt}
headers = {
"User-Agent": self.user_agent,
"Accept-Encoding": "gzip",
#"Cookie": "lat=" + self.location.latitude + "; long=" + self.location.longitude + "; pending=deleted; expires=Thu,01-Jan-1970 00:00:01 GMT;Max-Age=0",
}
return requests.post(url, data=params, params=getparams, headers=headers)
def get_yak_list(self, page, params):
return self.parse_yaks(self.get(page, params).text)
def parse_yaks(self, text):
try:
raw_yaks = json.loads(text)["messages"]
except:
raw_yaks = []
yaks = []
for raw_yak in raw_yaks:
yaks.append(Yak(raw_yak, self))
return yaks
def parse_comments(self, text, message_id):
try:
raw_comments = json.loads(text)["comments"]
except:
raw_comments = []
comments = []
for raw_comment in raw_comments:
comments.append(Comment(raw_comment, message_id, self))
return comments
def contact(self, message):
params = {
"userID": self.id,
"message": message,
}
return self.get("contactUs", params)
def upvote_yak(self, message_id):
params = {
"userID": self.id,
"messageID": message_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get("likeMessage", params)
def downvote_yak(self, message_id):
params = {
"userID": self.id,
"messageID": message_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get("downvoteMessage", params)
def upvote_comment(self, comment_id):
params = {
"userID": self.id,
"commentID": comment_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get("likeComment", params)
def downvote_comment(self, comment_id):
params = {
"userID": self.id,
"commentID": comment_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get("downvoteComment", params)
def report_yak(self, message_id):
params = params = {
"userID": self.id,
"messageID": message_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get("reportMessage", params)
def delete_yak(self, message_id):
params = params = {
"userID": self.id,
"messageID": message_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get("deleteMessage2", params)
def report_comment(self, comment_id, message_id):
params = {
"userID": self.id,
"commentID": comment_id,
"messageID": message_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get("reportMessage", params)
def delete_comment(self, comment_id, message_id):
params = {
"userID": self.id,
"commentID": comment_id,
"messageID": message_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get("deleteComment", params)
def get_greatest(self):
params = {
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get_yak_list("getGreatest", params)
def get_my_tops(self):
params = {
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
topuseryaks = self.get_yak_list("getMyTops", params)
topuseryaks.sort(key=lambda x: x.likes, reverse=True)
return topuseryaks
def get_recent_replied(self):
params = {
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get_yak_list("getMyRecentReplies", params)
def update_location(self, location):
self.location = location
def get_my_recent_yaks(self):
params = {
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get_yak_list("getMyRecentYaks", params)
def get_area_tops(self):
params = {
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
toplist = self.get_yak_list("getAreaTops", params)
toplist.sort(key=lambda x: x.likes, reverse=True)
return toplist
def get_yaks(self):
params = {
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
"version": self.version,
}
return self.get_yak_list("getMessages", params)
def post_yak(self, message, showloc=False, handle=False):
params = {
"userID": self.id,
"lat": self.location.latitude,
"long": self.location.longitude,
"message": message,
"version": self.version,
}
if not showloc:
params["hidePin"] = "1"
if handle and (self.handle is not None):
params["hndl"] = self.handle
return self.post("sendMessage", params)
def get_comments(self, message_id):
params = {
"userID": self.id,
"messageID": message_id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.parse_comments(self.get("getComments", params).text, message_id)
def post_comment(self, message_id, comment):
params = {
"userID": self.id,
"messageID": message_id,
"comment": comment,
"lat": self.location.latitude,
"long": self.location.longitude,
}
return self.post("postComment", params)
def get_peek_locations(self):
params = {
"userID": self.id,
"lat": self.location.latitude,
"long": self.location.longitude,
}
data = self.get("getMessages", params).json()
peeks = []
for peek_json in data['otherLocations']:
peeks.append(PeekLocation(peek_json))
return peeks
def get_featured_locations(self):
params = {
"userID": self.id,
"lat": self.location.latitude,
"long": self.location.longitude,
}
data = self.get("getMessages", params).json()
peeks = []
for peek_json in data['featuredLocations']:
peeks.append(PeekLocation(peek_json))
return peeks
def get_yakarma(self):
params = {
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
data = self.get("getMessages", params).json()
return int(data['yakarma'])
def peek(self, peek_id):
if isinstance(peek_id, PeekLocation):
peek_id = peek_id.id
params = {
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
'peekID': peek_id,
}
return self.get_yak_list("getPeekMessages", params)
def peekLoc(self, location):
params = {
"lat": location.latitude,
"long": location.longitude,
"userID": self.id,
"userLat": self.location.latitude,
"userLong": self.location.longitude,
}
return self.get_yak_list("yaks", params)
|
kylefrost/yakyik
|
wipapi/ya.py
|
Python
|
gpl-3.0
| 17,475
|
[
"Galaxy"
] |
fd37a18eb35f14fc791367cbbffd1b14df093943d135a1c7f612423143c3aea5
|
# revlog.py - storage back-end for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Storage back-end for Mercurial.
This provides efficient delta storage with O(1) retrieve and append
and O(changes) merge between branches.
"""
from __future__ import absolute_import
import collections
import errno
import os
import struct
import zlib
# import stuff from node for others to import from revlog
from .node import (
bin,
hex,
nullid,
nullrev,
)
from .i18n import _
from . import (
ancestor,
error,
mdiff,
parsers,
templatefilters,
util,
)
_pack = struct.pack
_unpack = struct.unpack
_compress = zlib.compress
_decompress = zlib.decompress
_sha = util.sha1
# revlog header flags
REVLOGV0 = 0
REVLOGNG = 1
REVLOGNGINLINEDATA = (1 << 16)
REVLOGGENERALDELTA = (1 << 17)
REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
REVLOG_DEFAULT_FORMAT = REVLOGNG
REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
# revlog index flags
REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
REVIDX_DEFAULT_FLAGS = 0
REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
# max size of revlog with inline data
_maxinline = 131072
_chunksize = 1048576
RevlogError = error.RevlogError
LookupError = error.LookupError
CensoredNodeError = error.CensoredNodeError
def getoffset(q):
return int(q >> 16)
def gettype(q):
return int(q & 0xFFFF)
def offset_type(offset, type):
return long(long(offset) << 16 | type)
_nullhash = _sha(nullid)
def hash(text, p1, p2):
"""generate a hash from the given text and its parent hashes
This hash combines both the current file contents and its history
in a manner that makes it easy to distinguish nodes with the same
content in the revision graph.
"""
# As of now, if one of the parent node is null, p2 is null
if p2 == nullid:
# deep copy of a hash is faster than creating one
s = _nullhash.copy()
s.update(p1)
else:
# none of the parent nodes are nullid
l = [p1, p2]
l.sort()
s = _sha(l[0])
s.update(l[1])
s.update(text)
return s.digest()
def decompress(bin):
""" decompress the given input """
if not bin:
return bin
t = bin[0]
if t == '\0':
return bin
if t == 'x':
try:
return _decompress(bin)
except zlib.error as e:
raise RevlogError(_("revlog decompress error: %s") % str(e))
if t == 'u':
return util.buffer(bin, 1)
raise RevlogError(_("unknown compression type %r") % t)
# index v0:
# 4 bytes: offset
# 4 bytes: compressed length
# 4 bytes: base rev
# 4 bytes: link rev
# 20 bytes: parent 1 nodeid
# 20 bytes: parent 2 nodeid
# 20 bytes: nodeid
indexformatv0 = ">4l20s20s20s"
class revlogoldio(object):
def __init__(self):
self.size = struct.calcsize(indexformatv0)
def parseindex(self, data, inline):
s = self.size
index = []
nodemap = {nullid: nullrev}
n = off = 0
l = len(data)
while off + s <= l:
cur = data[off:off + s]
off += s
e = _unpack(indexformatv0, cur)
# transform to revlogv1 format
e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
index.append(e2)
nodemap[e[6]] = n
n += 1
# add the magic null revision at -1
index.append((0, 0, 0, -1, -1, -1, -1, nullid))
return index, nodemap, None
def packentry(self, entry, node, version, rev):
if gettype(entry[0]):
raise RevlogError(_("index entry flags need RevlogNG"))
e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
node(entry[5]), node(entry[6]), entry[7])
return _pack(indexformatv0, *e2)
# index ng:
# 6 bytes: offset
# 2 bytes: flags
# 4 bytes: compressed length
# 4 bytes: uncompressed length
# 4 bytes: base rev
# 4 bytes: link rev
# 4 bytes: parent 1 rev
# 4 bytes: parent 2 rev
# 32 bytes: nodeid
indexformatng = ">Qiiiiii20s12x"
versionformat = ">I"
# corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
# signed integer)
_maxentrysize = 0x7fffffff
class revlogio(object):
def __init__(self):
self.size = struct.calcsize(indexformatng)
def parseindex(self, data, inline):
# call the C implementation to parse the index data
index, cache = parsers.parse_index2(data, inline)
return index, getattr(index, 'nodemap', None), cache
def packentry(self, entry, node, version, rev):
p = _pack(indexformatng, *entry)
if rev == 0:
p = _pack(versionformat, version) + p[4:]
return p
class revlog(object):
"""
the underlying revision storage object
A revlog consists of two parts, an index and the revision data.
The index is a file with a fixed record size containing
information on each revision, including its nodeid (hash), the
nodeids of its parents, the position and offset of its data within
the data file, and the revision it's based on. Finally, each entry
contains a linkrev entry that can serve as a pointer to external
data.
The revision data itself is a linear collection of data chunks.
Each chunk represents a revision and is usually represented as a
delta against the previous chunk. To bound lookup time, runs of
deltas are limited to about 2 times the length of the original
version data. This makes retrieval of a version proportional to
its size, or O(1) relative to the number of revisions.
Both pieces of the revlog are written to in an append-only
fashion, which means we never need to rewrite a file to insert or
remove data, and can use some simple techniques to avoid the need
for locking while reading.
"""
def __init__(self, opener, indexfile):
"""
create a revlog object
opener is a function that abstracts the file opening operation
and can be used to implement COW semantics or the like.
"""
self.indexfile = indexfile
self.datafile = indexfile[:-2] + ".d"
self.opener = opener
# 3-tuple of (node, rev, text) for a raw revision.
self._cache = None
# 2-tuple of (rev, baserev) defining the base revision the delta chain
# begins at for a revision.
self._basecache = None
# 2-tuple of (offset, data) of raw data from the revlog at an offset.
self._chunkcache = (0, '')
# How much data to read and cache into the raw revlog data cache.
self._chunkcachesize = 65536
self._maxchainlen = None
self._aggressivemergedeltas = False
self.index = []
# Mapping of partial identifiers to full nodes.
self._pcache = {}
# Mapping of revision integer to full node.
self._nodecache = {nullid: nullrev}
self._nodepos = None
v = REVLOG_DEFAULT_VERSION
opts = getattr(opener, 'options', None)
if opts is not None:
if 'revlogv1' in opts:
if 'generaldelta' in opts:
v |= REVLOGGENERALDELTA
else:
v = 0
if 'chunkcachesize' in opts:
self._chunkcachesize = opts['chunkcachesize']
if 'maxchainlen' in opts:
self._maxchainlen = opts['maxchainlen']
if 'aggressivemergedeltas' in opts:
self._aggressivemergedeltas = opts['aggressivemergedeltas']
self._lazydeltabase = bool(opts.get('lazydeltabase', False))
if self._chunkcachesize <= 0:
raise RevlogError(_('revlog chunk cache size %r is not greater '
'than 0') % self._chunkcachesize)
elif self._chunkcachesize & (self._chunkcachesize - 1):
raise RevlogError(_('revlog chunk cache size %r is not a power '
'of 2') % self._chunkcachesize)
indexdata = ''
self._initempty = True
try:
f = self.opener(self.indexfile)
indexdata = f.read()
f.close()
if len(indexdata) > 0:
v = struct.unpack(versionformat, indexdata[:4])[0]
self._initempty = False
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
self.version = v
self._inline = v & REVLOGNGINLINEDATA
self._generaldelta = v & REVLOGGENERALDELTA
flags = v & ~0xFFFF
fmt = v & 0xFFFF
if fmt == REVLOGV0 and flags:
raise RevlogError(_("index %s unknown flags %#04x for format v0")
% (self.indexfile, flags >> 16))
elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
raise RevlogError(_("index %s unknown flags %#04x for revlogng")
% (self.indexfile, flags >> 16))
elif fmt > REVLOGNG:
raise RevlogError(_("index %s unknown format %d")
% (self.indexfile, fmt))
self._io = revlogio()
if self.version == REVLOGV0:
self._io = revlogoldio()
try:
d = self._io.parseindex(indexdata, self._inline)
except (ValueError, IndexError):
raise RevlogError(_("index %s is corrupted") % (self.indexfile))
self.index, nodemap, self._chunkcache = d
if nodemap is not None:
self.nodemap = self._nodecache = nodemap
if not self._chunkcache:
self._chunkclear()
# revnum -> (chain-length, sum-delta-length)
self._chaininfocache = {}
def tip(self):
return self.node(len(self.index) - 2)
def __contains__(self, rev):
return 0 <= rev < len(self)
def __len__(self):
return len(self.index) - 1
def __iter__(self):
return iter(xrange(len(self)))
def revs(self, start=0, stop=None):
"""iterate over all rev in this revlog (from start to stop)"""
step = 1
if stop is not None:
if start > stop:
step = -1
stop += step
else:
stop = len(self)
return xrange(start, stop, step)
@util.propertycache
def nodemap(self):
self.rev(self.node(0))
return self._nodecache
def hasnode(self, node):
try:
self.rev(node)
return True
except KeyError:
return False
def clearcaches(self):
self._cache = None
self._basecache = None
self._chunkcache = (0, '')
self._pcache = {}
try:
self._nodecache.clearcaches()
except AttributeError:
self._nodecache = {nullid: nullrev}
self._nodepos = None
def rev(self, node):
try:
return self._nodecache[node]
except TypeError:
raise
except RevlogError:
# parsers.c radix tree lookup failed
raise LookupError(node, self.indexfile, _('no node'))
except KeyError:
# pure python cache lookup failed
n = self._nodecache
i = self.index
p = self._nodepos
if p is None:
p = len(i) - 2
for r in xrange(p, -1, -1):
v = i[r][7]
n[v] = r
if v == node:
self._nodepos = r - 1
return r
raise LookupError(node, self.indexfile, _('no node'))
def node(self, rev):
return self.index[rev][7]
def linkrev(self, rev):
return self.index[rev][4]
def parents(self, node):
i = self.index
d = i[self.rev(node)]
return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
def parentrevs(self, rev):
return self.index[rev][5:7]
def start(self, rev):
return int(self.index[rev][0] >> 16)
def end(self, rev):
return self.start(rev) + self.length(rev)
def length(self, rev):
return self.index[rev][1]
def chainbase(self, rev):
index = self.index
base = index[rev][3]
while base != rev:
rev = base
base = index[rev][3]
return base
def chainlen(self, rev):
return self._chaininfo(rev)[0]
def _chaininfo(self, rev):
chaininfocache = self._chaininfocache
if rev in chaininfocache:
return chaininfocache[rev]
index = self.index
generaldelta = self._generaldelta
iterrev = rev
e = index[iterrev]
clen = 0
compresseddeltalen = 0
while iterrev != e[3]:
clen += 1
compresseddeltalen += e[1]
if generaldelta:
iterrev = e[3]
else:
iterrev -= 1
if iterrev in chaininfocache:
t = chaininfocache[iterrev]
clen += t[0]
compresseddeltalen += t[1]
break
e = index[iterrev]
else:
# Add text length of base since decompressing that also takes
# work. For cache hits the length is already included.
compresseddeltalen += e[1]
r = (clen, compresseddeltalen)
chaininfocache[rev] = r
return r
def _deltachain(self, rev, stoprev=None):
"""Obtain the delta chain for a revision.
``stoprev`` specifies a revision to stop at. If not specified, we
stop at the base of the chain.
Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
revs in ascending order and ``stopped`` is a bool indicating whether
``stoprev`` was hit.
"""
chain = []
# Alias to prevent attribute lookup in tight loop.
index = self.index
generaldelta = self._generaldelta
iterrev = rev
e = index[iterrev]
while iterrev != e[3] and iterrev != stoprev:
chain.append(iterrev)
if generaldelta:
iterrev = e[3]
else:
iterrev -= 1
e = index[iterrev]
if iterrev == stoprev:
stopped = True
else:
chain.append(iterrev)
stopped = False
chain.reverse()
return chain, stopped
def flags(self, rev):
return self.index[rev][0] & 0xFFFF
def rawsize(self, rev):
"""return the length of the uncompressed text for a given revision"""
l = self.index[rev][2]
if l >= 0:
return l
t = self.revision(self.node(rev))
return len(t)
size = rawsize
def ancestors(self, revs, stoprev=0, inclusive=False):
"""Generate the ancestors of 'revs' in reverse topological order.
Does not generate revs lower than stoprev.
See the documentation for ancestor.lazyancestors for more details."""
return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
inclusive=inclusive)
def descendants(self, revs):
"""Generate the descendants of 'revs' in revision order.
Yield a sequence of revision numbers starting with a child of
some rev in revs, i.e., each revision is *not* considered a
descendant of itself. Results are ordered by revision number (a
topological sort)."""
first = min(revs)
if first == nullrev:
for i in self:
yield i
return
seen = set(revs)
for i in self.revs(start=first + 1):
for x in self.parentrevs(i):
if x != nullrev and x in seen:
seen.add(i)
yield i
break
def findcommonmissing(self, common=None, heads=None):
"""Return a tuple of the ancestors of common and the ancestors of heads
that are not ancestors of common. In revset terminology, we return the
tuple:
::common, (::heads) - (::common)
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of node IDs. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
common = [nullid]
if heads is None:
heads = self.heads()
common = [self.rev(n) for n in common]
heads = [self.rev(n) for n in heads]
# we want the ancestors, but inclusive
class lazyset(object):
def __init__(self, lazyvalues):
self.addedvalues = set()
self.lazyvalues = lazyvalues
def __contains__(self, value):
return value in self.addedvalues or value in self.lazyvalues
def __iter__(self):
added = self.addedvalues
for r in added:
yield r
for r in self.lazyvalues:
if not r in added:
yield r
def add(self, value):
self.addedvalues.add(value)
def update(self, values):
self.addedvalues.update(values)
has = lazyset(self.ancestors(common))
has.add(nullrev)
has.update(common)
# take all ancestors from heads that aren't in has
missing = set()
visit = collections.deque(r for r in heads if r not in has)
while visit:
r = visit.popleft()
if r in missing:
continue
else:
missing.add(r)
for p in self.parentrevs(r):
if p not in has:
visit.append(p)
missing = list(missing)
missing.sort()
return has, [self.node(r) for r in missing]
def incrementalmissingrevs(self, common=None):
"""Return an object that can be used to incrementally compute the
revision numbers of the ancestors of arbitrary sets that are not
ancestors of common. This is an ancestor.incrementalmissingancestors
object.
'common' is a list of revision numbers. If common is not supplied, uses
nullrev.
"""
if common is None:
common = [nullrev]
return ancestor.incrementalmissingancestors(self.parentrevs, common)
def findmissingrevs(self, common=None, heads=None):
"""Return the revision numbers of the ancestors of heads that
are not ancestors of common.
More specifically, return a list of revision numbers corresponding to
nodes N such that every N satisfies the following constraints:
1. N is an ancestor of some node in 'heads'
2. N is not an ancestor of any node in 'common'
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of revision numbers. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
common = [nullrev]
if heads is None:
heads = self.headrevs()
inc = self.incrementalmissingrevs(common=common)
return inc.missingancestors(heads)
def findmissing(self, common=None, heads=None):
"""Return the ancestors of heads that are not ancestors of common.
More specifically, return a list of nodes N such that every N
satisfies the following constraints:
1. N is an ancestor of some node in 'heads'
2. N is not an ancestor of any node in 'common'
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of node IDs. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
common = [nullid]
if heads is None:
heads = self.heads()
common = [self.rev(n) for n in common]
heads = [self.rev(n) for n in heads]
inc = self.incrementalmissingrevs(common=common)
return [self.node(r) for r in inc.missingancestors(heads)]
def nodesbetween(self, roots=None, heads=None):
"""Return a topological path from 'roots' to 'heads'.
Return a tuple (nodes, outroots, outheads) where 'nodes' is a
topologically sorted list of all nodes N that satisfy both of
these constraints:
1. N is a descendant of some node in 'roots'
2. N is an ancestor of some node in 'heads'
Every node is considered to be both a descendant and an ancestor
of itself, so every reachable node in 'roots' and 'heads' will be
included in 'nodes'.
'outroots' is the list of reachable nodes in 'roots', i.e., the
subset of 'roots' that is returned in 'nodes'. Likewise,
'outheads' is the subset of 'heads' that is also in 'nodes'.
'roots' and 'heads' are both lists of node IDs. If 'roots' is
unspecified, uses nullid as the only root. If 'heads' is
unspecified, uses list of all of the revlog's heads."""
nonodes = ([], [], [])
if roots is not None:
roots = list(roots)
if not roots:
return nonodes
lowestrev = min([self.rev(n) for n in roots])
else:
roots = [nullid] # Everybody's a descendant of nullid
lowestrev = nullrev
if (lowestrev == nullrev) and (heads is None):
# We want _all_ the nodes!
return ([self.node(r) for r in self], [nullid], list(self.heads()))
if heads is None:
# All nodes are ancestors, so the latest ancestor is the last
# node.
highestrev = len(self) - 1
# Set ancestors to None to signal that every node is an ancestor.
ancestors = None
# Set heads to an empty dictionary for later discovery of heads
heads = {}
else:
heads = list(heads)
if not heads:
return nonodes
ancestors = set()
# Turn heads into a dictionary so we can remove 'fake' heads.
# Also, later we will be using it to filter out the heads we can't
# find from roots.
heads = dict.fromkeys(heads, False)
# Start at the top and keep marking parents until we're done.
nodestotag = set(heads)
# Remember where the top was so we can use it as a limit later.
highestrev = max([self.rev(n) for n in nodestotag])
while nodestotag:
# grab a node to tag
n = nodestotag.pop()
# Never tag nullid
if n == nullid:
continue
# A node's revision number represents its place in a
# topologically sorted list of nodes.
r = self.rev(n)
if r >= lowestrev:
if n not in ancestors:
# If we are possibly a descendant of one of the roots
# and we haven't already been marked as an ancestor
ancestors.add(n) # Mark as ancestor
# Add non-nullid parents to list of nodes to tag.
nodestotag.update([p for p in self.parents(n) if
p != nullid])
elif n in heads: # We've seen it before, is it a fake head?
# So it is, real heads should not be the ancestors of
# any other heads.
heads.pop(n)
if not ancestors:
return nonodes
# Now that we have our set of ancestors, we want to remove any
# roots that are not ancestors.
# If one of the roots was nullid, everything is included anyway.
if lowestrev > nullrev:
# But, since we weren't, let's recompute the lowest rev to not
# include roots that aren't ancestors.
# Filter out roots that aren't ancestors of heads
roots = [n for n in roots if n in ancestors]
# Recompute the lowest revision
if roots:
lowestrev = min([self.rev(n) for n in roots])
else:
# No more roots? Return empty list
return nonodes
else:
# We are descending from nullid, and don't need to care about
# any other roots.
lowestrev = nullrev
roots = [nullid]
# Transform our roots list into a set.
descendants = set(roots)
# Also, keep the original roots so we can filter out roots that aren't
# 'real' roots (i.e. are descended from other roots).
roots = descendants.copy()
# Our topologically sorted list of output nodes.
orderedout = []
# Don't start at nullid since we don't want nullid in our output list,
# and if nullid shows up in descendants, empty parents will look like
# they're descendants.
for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
n = self.node(r)
isdescendant = False
if lowestrev == nullrev: # Everybody is a descendant of nullid
isdescendant = True
elif n in descendants:
# n is already a descendant
isdescendant = True
# This check only needs to be done here because all the roots
# will start being marked is descendants before the loop.
if n in roots:
# If n was a root, check if it's a 'real' root.
p = tuple(self.parents(n))
# If any of its parents are descendants, it's not a root.
if (p[0] in descendants) or (p[1] in descendants):
roots.remove(n)
else:
p = tuple(self.parents(n))
# A node is a descendant if either of its parents are
# descendants. (We seeded the dependents list with the roots
# up there, remember?)
if (p[0] in descendants) or (p[1] in descendants):
descendants.add(n)
isdescendant = True
if isdescendant and ((ancestors is None) or (n in ancestors)):
# Only include nodes that are both descendants and ancestors.
orderedout.append(n)
if (ancestors is not None) and (n in heads):
# We're trying to figure out which heads are reachable
# from roots.
# Mark this head as having been reached
heads[n] = True
elif ancestors is None:
# Otherwise, we're trying to discover the heads.
# Assume this is a head because if it isn't, the next step
# will eventually remove it.
heads[n] = True
# But, obviously its parents aren't.
for p in self.parents(n):
heads.pop(p, None)
heads = [n for n, flag in heads.iteritems() if flag]
roots = list(roots)
assert orderedout
assert roots
assert heads
return (orderedout, roots, heads)
def headrevs(self):
try:
return self.index.headrevs()
except AttributeError:
return self._headrevs()
def computephases(self, roots):
return self.index.computephasesmapsets(roots)
def _headrevs(self):
count = len(self)
if not count:
return [nullrev]
# we won't iter over filtered rev so nobody is a head at start
ishead = [0] * (count + 1)
index = self.index
for r in self:
ishead[r] = 1 # I may be an head
e = index[r]
ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
return [r for r, val in enumerate(ishead) if val]
def heads(self, start=None, stop=None):
"""return the list of all nodes that have no children
if start is specified, only heads that are descendants of
start will be returned
if stop is specified, it will consider all the revs from stop
as if they had no children
"""
if start is None and stop is None:
if not len(self):
return [nullid]
return [self.node(r) for r in self.headrevs()]
if start is None:
start = nullid
if stop is None:
stop = []
stoprevs = set([self.rev(n) for n in stop])
startrev = self.rev(start)
reachable = set((startrev,))
heads = set((startrev,))
parentrevs = self.parentrevs
for r in self.revs(start=startrev + 1):
for p in parentrevs(r):
if p in reachable:
if r not in stoprevs:
reachable.add(r)
heads.add(r)
if p in heads and p not in stoprevs:
heads.remove(p)
return [self.node(r) for r in heads]
def children(self, node):
"""find the children of a given node"""
c = []
p = self.rev(node)
for r in self.revs(start=p + 1):
prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
if prevs:
for pr in prevs:
if pr == p:
c.append(self.node(r))
elif p == nullrev:
c.append(self.node(r))
return c
def descendant(self, start, end):
if start == nullrev:
return True
for i in self.descendants([start]):
if i == end:
return True
elif i > end:
break
return False
def commonancestorsheads(self, a, b):
"""calculate all the heads of the common ancestors of nodes a and b"""
a, b = self.rev(a), self.rev(b)
try:
ancs = self.index.commonancestorsheads(a, b)
except (AttributeError, OverflowError): # C implementation failed
ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
return map(self.node, ancs)
def isancestor(self, a, b):
"""return True if node a is an ancestor of node b
The implementation of this is trivial but the use of
commonancestorsheads is not."""
return a in self.commonancestorsheads(a, b)
def ancestor(self, a, b):
"""calculate the "best" common ancestor of nodes a and b"""
a, b = self.rev(a), self.rev(b)
try:
ancs = self.index.ancestors(a, b)
except (AttributeError, OverflowError):
ancs = ancestor.ancestors(self.parentrevs, a, b)
if ancs:
# choose a consistent winner when there's a tie
return min(map(self.node, ancs))
return nullid
def _match(self, id):
if isinstance(id, int):
# rev
return self.node(id)
if len(id) == 20:
# possibly a binary node
# odds of a binary node being all hex in ASCII are 1 in 10**25
try:
node = id
self.rev(node) # quick search the index
return node
except LookupError:
pass # may be partial hex id
try:
# str(rev)
rev = int(id)
if str(rev) != id:
raise ValueError
if rev < 0:
rev = len(self) + rev
if rev < 0 or rev >= len(self):
raise ValueError
return self.node(rev)
except (ValueError, OverflowError):
pass
if len(id) == 40:
try:
# a full hex nodeid?
node = bin(id)
self.rev(node)
return node
except (TypeError, LookupError):
pass
def _partialmatch(self, id):
try:
n = self.index.partialmatch(id)
if n and self.hasnode(n):
return n
return None
except RevlogError:
# parsers.c radix tree lookup gave multiple matches
# fall through to slow path that filters hidden revisions
pass
except (AttributeError, ValueError):
# we are pure python, or key was too short to search radix tree
pass
if id in self._pcache:
return self._pcache[id]
if len(id) < 40:
try:
# hex(node)[:...]
l = len(id) // 2 # grab an even number of digits
prefix = bin(id[:l * 2])
nl = [e[7] for e in self.index if e[7].startswith(prefix)]
nl = [n for n in nl if hex(n).startswith(id) and
self.hasnode(n)]
if len(nl) > 0:
if len(nl) == 1:
self._pcache[id] = nl[0]
return nl[0]
raise LookupError(id, self.indexfile,
_('ambiguous identifier'))
return None
except TypeError:
pass
def lookup(self, id):
"""locate a node based on:
- revision number or str(revision number)
- nodeid or subset of hex nodeid
"""
n = self._match(id)
if n is not None:
return n
n = self._partialmatch(id)
if n:
return n
raise LookupError(id, self.indexfile, _('no match found'))
def cmp(self, node, text):
"""compare text with a given file revision
returns True if text is different than what is stored.
"""
p1, p2 = self.parents(node)
return hash(text, p1, p2) != node
def _addchunk(self, offset, data):
"""Add a segment to the revlog cache.
Accepts an absolute offset and the data that is at that location.
"""
o, d = self._chunkcache
# try to add to existing cache
if o + len(d) == offset and len(d) + len(data) < _chunksize:
self._chunkcache = o, d + data
else:
self._chunkcache = offset, data
def _loadchunk(self, offset, length, df=None):
"""Load a segment of raw data from the revlog.
Accepts an absolute offset, length to read, and an optional existing
file handle to read from.
If an existing file handle is passed, it will be seeked and the
original seek position will NOT be restored.
Returns a str or buffer of raw byte data.
"""
if df is not None:
closehandle = False
else:
if self._inline:
df = self.opener(self.indexfile)
else:
df = self.opener(self.datafile)
closehandle = True
# Cache data both forward and backward around the requested
# data, in a fixed size window. This helps speed up operations
# involving reading the revlog backwards.
cachesize = self._chunkcachesize
realoffset = offset & ~(cachesize - 1)
reallength = (((offset + length + cachesize) & ~(cachesize - 1))
- realoffset)
df.seek(realoffset)
d = df.read(reallength)
if closehandle:
df.close()
self._addchunk(realoffset, d)
if offset != realoffset or reallength != length:
return util.buffer(d, offset - realoffset, length)
return d
def _getchunk(self, offset, length, df=None):
"""Obtain a segment of raw data from the revlog.
Accepts an absolute offset, length of bytes to obtain, and an
optional file handle to the already-opened revlog. If the file
handle is used, it's original seek position will not be preserved.
Requests for data may be returned from a cache.
Returns a str or a buffer instance of raw byte data.
"""
o, d = self._chunkcache
l = len(d)
# is it in the cache?
cachestart = offset - o
cacheend = cachestart + length
if cachestart >= 0 and cacheend <= l:
if cachestart == 0 and cacheend == l:
return d # avoid a copy
return util.buffer(d, cachestart, cacheend - cachestart)
return self._loadchunk(offset, length, df=df)
def _chunkraw(self, startrev, endrev, df=None):
"""Obtain a segment of raw data corresponding to a range of revisions.
Accepts the start and end revisions and an optional already-open
file handle to be used for reading. If the file handle is read, its
seek position will not be preserved.
Requests for data may be satisfied by a cache.
Returns a 2-tuple of (offset, data) for the requested range of
revisions. Offset is the integer offset from the beginning of the
revlog and data is a str or buffer of the raw byte data.
Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
to determine where each revision's data begins and ends.
"""
start = self.start(startrev)
end = self.end(endrev)
if self._inline:
start += (startrev + 1) * self._io.size
end += (endrev + 1) * self._io.size
length = end - start
return start, self._getchunk(start, length, df=df)
def _chunk(self, rev, df=None):
"""Obtain a single decompressed chunk for a revision.
Accepts an integer revision and an optional already-open file handle
to be used for reading. If used, the seek position of the file will not
be preserved.
Returns a str holding uncompressed data for the requested revision.
"""
return decompress(self._chunkraw(rev, rev, df=df)[1])
def _chunks(self, revs, df=None):
"""Obtain decompressed chunks for the specified revisions.
Accepts an iterable of numeric revisions that are assumed to be in
ascending order. Also accepts an optional already-open file handle
to be used for reading. If used, the seek position of the file will
not be preserved.
This function is similar to calling ``self._chunk()`` multiple times,
but is faster.
Returns a list with decompressed data for each requested revision.
"""
if not revs:
return []
start = self.start
length = self.length
inline = self._inline
iosize = self._io.size
buffer = util.buffer
l = []
ladd = l.append
try:
offset, data = self._chunkraw(revs[0], revs[-1], df=df)
except OverflowError:
# issue4215 - we can't cache a run of chunks greater than
# 2G on Windows
return [self._chunk(rev, df=df) for rev in revs]
for rev in revs:
chunkstart = start(rev)
if inline:
chunkstart += (rev + 1) * iosize
chunklength = length(rev)
ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
return l
def _chunkclear(self):
"""Clear the raw chunk cache."""
self._chunkcache = (0, '')
def deltaparent(self, rev):
"""return deltaparent of the given revision"""
base = self.index[rev][3]
if base == rev:
return nullrev
elif self._generaldelta:
return base
else:
return rev - 1
def revdiff(self, rev1, rev2):
"""return or calculate a delta between two revisions"""
if rev1 != nullrev and self.deltaparent(rev2) == rev1:
return str(self._chunk(rev2))
return mdiff.textdiff(self.revision(rev1),
self.revision(rev2))
def revision(self, nodeorrev, _df=None):
"""return an uncompressed revision of a given node or revision
number.
_df is an existing file handle to read from. It is meant to only be
used internally.
"""
if isinstance(nodeorrev, int):
rev = nodeorrev
node = self.node(rev)
else:
node = nodeorrev
rev = None
cachedrev = None
if node == nullid:
return ""
if self._cache:
if self._cache[0] == node:
return self._cache[2]
cachedrev = self._cache[1]
# look up what we need to read
text = None
if rev is None:
rev = self.rev(node)
# check rev flags
if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
raise RevlogError(_('incompatible revision flag %x') %
(self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
chain, stopped = self._deltachain(rev, stoprev=cachedrev)
if stopped:
text = self._cache[2]
# drop cache to save memory
self._cache = None
bins = self._chunks(chain, df=_df)
if text is None:
text = str(bins[0])
bins = bins[1:]
text = mdiff.patches(text, bins)
text = self._checkhash(text, node, rev)
self._cache = (node, rev, text)
return text
def hash(self, text, p1, p2):
"""Compute a node hash.
Available as a function so that subclasses can replace the hash
as needed.
"""
return hash(text, p1, p2)
def _checkhash(self, text, node, rev):
p1, p2 = self.parents(node)
self.checkhash(text, p1, p2, node, rev)
return text
def checkhash(self, text, p1, p2, node, rev=None):
if node != self.hash(text, p1, p2):
revornode = rev
if revornode is None:
revornode = templatefilters.short(hex(node))
raise RevlogError(_("integrity check failed on %s:%s")
% (self.indexfile, revornode))
def checkinlinesize(self, tr, fp=None):
"""Check if the revlog is too big for inline and convert if so.
This should be called after revisions are added to the revlog. If the
revlog has grown too large to be an inline revlog, it will convert it
to use multiple index and data files.
"""
if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
return
trinfo = tr.find(self.indexfile)
if trinfo is None:
raise RevlogError(_("%s not found in the transaction")
% self.indexfile)
trindex = trinfo[2]
if trindex is not None:
dataoff = self.start(trindex)
else:
# revlog was stripped at start of transaction, use all leftover data
trindex = len(self) - 1
dataoff = self.end(-2)
tr.add(self.datafile, dataoff)
if fp:
fp.flush()
fp.close()
df = self.opener(self.datafile, 'w')
try:
for r in self:
df.write(self._chunkraw(r, r)[1])
finally:
df.close()
fp = self.opener(self.indexfile, 'w', atomictemp=True)
self.version &= ~(REVLOGNGINLINEDATA)
self._inline = False
for i in self:
e = self._io.packentry(self.index[i], self.node, self.version, i)
fp.write(e)
# if we don't call close, the temp file will never replace the
# real index
fp.close()
tr.replace(self.indexfile, trindex * self._io.size)
self._chunkclear()
def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
node=None):
"""add a revision to the log
text - the revision data to add
transaction - the transaction object used for rollback
link - the linkrev data to add
p1, p2 - the parent nodeids of the revision
cachedelta - an optional precomputed delta
node - nodeid of revision; typically node is not specified, and it is
computed by default as hash(text, p1, p2), however subclasses might
use different hashing method (and override checkhash() in such case)
"""
if link == nullrev:
raise RevlogError(_("attempted to add linkrev -1 to %s")
% self.indexfile)
if len(text) > _maxentrysize:
raise RevlogError(
_("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
% (self.indexfile, len(text)))
node = node or self.hash(text, p1, p2)
if node in self.nodemap:
return node
dfh = None
if not self._inline:
dfh = self.opener(self.datafile, "a+")
ifh = self.opener(self.indexfile, "a+")
try:
return self._addrevision(node, text, transaction, link, p1, p2,
REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
finally:
if dfh:
dfh.close()
ifh.close()
def compress(self, text):
""" generate a possibly-compressed representation of text """
if not text:
return ("", text)
l = len(text)
bin = None
if l < 44:
pass
elif l > 1000000:
# zlib makes an internal copy, thus doubling memory usage for
# large files, so lets do this in pieces
z = zlib.compressobj()
p = []
pos = 0
while pos < l:
pos2 = pos + 2**20
p.append(z.compress(text[pos:pos2]))
pos = pos2
p.append(z.flush())
if sum(map(len, p)) < l:
bin = "".join(p)
else:
bin = _compress(text)
if bin is None or len(bin) > l:
if text[0] == '\0':
return ("", text)
return ('u', text)
return ("", bin)
def _isgooddelta(self, d, textlen):
"""Returns True if the given delta is good. Good means that it is within
the disk span, disk size, and chain length bounds that we know to be
performant."""
if d is None:
return False
# - 'dist' is the distance from the base revision -- bounding it limits
# the amount of I/O we need to do.
# - 'compresseddeltalen' is the sum of the total size of deltas we need
# to apply -- bounding it limits the amount of CPU we consume.
dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
if (dist > textlen * 4 or l > textlen or
compresseddeltalen > textlen * 2 or
(self._maxchainlen and chainlen > self._maxchainlen)):
return False
return True
def _addrevision(self, node, text, transaction, link, p1, p2, flags,
cachedelta, ifh, dfh, alwayscache=False):
"""internal function to add revisions to the log
see addrevision for argument descriptions.
invariants:
- text is optional (can be None); if not set, cachedelta must be set.
if both are set, they must correspond to each other.
"""
btext = [text]
def buildtext():
if btext[0] is not None:
return btext[0]
baserev = cachedelta[0]
delta = cachedelta[1]
# special case deltas which replace entire base; no need to decode
# base revision. this neatly avoids censored bases, which throw when
# they're decoded.
hlen = struct.calcsize(">lll")
if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
len(delta) - hlen):
btext[0] = delta[hlen:]
else:
if self._inline:
fh = ifh
else:
fh = dfh
basetext = self.revision(self.node(baserev), _df=fh)
btext[0] = mdiff.patch(basetext, delta)
try:
self.checkhash(btext[0], p1, p2, node)
if flags & REVIDX_ISCENSORED:
raise RevlogError(_('node %s is not censored') % node)
except CensoredNodeError:
# must pass the censored index flag to add censored revisions
if not flags & REVIDX_ISCENSORED:
raise
return btext[0]
def builddelta(rev):
# can we use the cached delta?
if cachedelta and cachedelta[0] == rev:
delta = cachedelta[1]
else:
t = buildtext()
if self.iscensored(rev):
# deltas based on a censored revision must replace the
# full content in one patch, so delta works everywhere
header = mdiff.replacediffheader(self.rawsize(rev), len(t))
delta = header + t
else:
if self._inline:
fh = ifh
else:
fh = dfh
ptext = self.revision(self.node(rev), _df=fh)
delta = mdiff.textdiff(ptext, t)
data = self.compress(delta)
l = len(data[1]) + len(data[0])
if basecache[0] == rev:
chainbase = basecache[1]
else:
chainbase = self.chainbase(rev)
dist = l + offset - self.start(chainbase)
if self._generaldelta:
base = rev
else:
base = chainbase
chainlen, compresseddeltalen = self._chaininfo(rev)
chainlen += 1
compresseddeltalen += l
return dist, l, data, base, chainbase, chainlen, compresseddeltalen
curr = len(self)
prev = curr - 1
base = chainbase = curr
offset = self.end(prev)
delta = None
if self._basecache is None:
self._basecache = (prev, self.chainbase(prev))
basecache = self._basecache
p1r, p2r = self.rev(p1), self.rev(p2)
# full versions are inserted when the needed deltas
# become comparable to the uncompressed text
if text is None:
textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
cachedelta[1])
else:
textlen = len(text)
# should we try to build a delta?
if prev != nullrev:
tested = set()
if cachedelta and self._generaldelta and self._lazydeltabase:
# Assume what we received from the server is a good choice
# build delta will reuse the cache
candidatedelta = builddelta(cachedelta[0])
tested.add(cachedelta[0])
if self._isgooddelta(candidatedelta, textlen):
delta = candidatedelta
if delta is None and self._generaldelta:
# exclude already lazy tested base if any
parents = [p for p in (p1r, p2r)
if p != nullrev and p not in tested]
if parents and not self._aggressivemergedeltas:
# Pick whichever parent is closer to us (to minimize the
# chance of having to build a fulltext).
parents = [max(parents)]
tested.update(parents)
pdeltas = []
for p in parents:
pd = builddelta(p)
if self._isgooddelta(pd, textlen):
pdeltas.append(pd)
if pdeltas:
delta = min(pdeltas, key=lambda x: x[1])
if delta is None and prev not in tested:
# other approach failed try against prev to hopefully save us a
# fulltext.
candidatedelta = builddelta(prev)
if self._isgooddelta(candidatedelta, textlen):
delta = candidatedelta
if delta is not None:
dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
else:
text = buildtext()
data = self.compress(text)
l = len(data[1]) + len(data[0])
base = chainbase = curr
e = (offset_type(offset, flags), l, textlen,
base, link, p1r, p2r, node)
self.index.insert(-1, e)
self.nodemap[node] = curr
entry = self._io.packentry(e, self.node, self.version, curr)
self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
if alwayscache and text is None:
text = buildtext()
if type(text) == str: # only accept immutable objects
self._cache = (node, curr, text)
self._basecache = (curr, chainbase)
return node
def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
# Files opened in a+ mode have inconsistent behavior on various
# platforms. Windows requires that a file positioning call be made
# when the file handle transitions between reads and writes. See
# 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
# platforms, Python or the platform itself can be buggy. Some versions
# of Solaris have been observed to not append at the end of the file
# if the file was seeked to before the end. See issue4943 for more.
#
# We work around this issue by inserting a seek() before writing.
# Note: This is likely not necessary on Python 3.
ifh.seek(0, os.SEEK_END)
if dfh:
dfh.seek(0, os.SEEK_END)
curr = len(self) - 1
if not self._inline:
transaction.add(self.datafile, offset)
transaction.add(self.indexfile, curr * len(entry))
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
ifh.write(entry)
else:
offset += curr * self._io.size
transaction.add(self.indexfile, offset, curr)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
self.checkinlinesize(transaction, ifh)
def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
"""
add a delta group
given a set of deltas, add them to the revision log. the
first delta is against its parent, which should be in our
log, the rest are against the previous delta.
If ``addrevisioncb`` is defined, it will be called with arguments of
this revlog and the node that was added.
"""
# track the base of the current delta log
content = []
node = None
r = len(self)
end = 0
if r:
end = self.end(r - 1)
ifh = self.opener(self.indexfile, "a+")
isize = r * self._io.size
if self._inline:
transaction.add(self.indexfile, end + isize, r)
dfh = None
else:
transaction.add(self.indexfile, isize, r)
transaction.add(self.datafile, end)
dfh = self.opener(self.datafile, "a+")
def flush():
if dfh:
dfh.flush()
ifh.flush()
try:
# loop through our set of deltas
chain = None
while True:
chunkdata = cg.deltachunk(chain)
if not chunkdata:
break
node = chunkdata['node']
p1 = chunkdata['p1']
p2 = chunkdata['p2']
cs = chunkdata['cs']
deltabase = chunkdata['deltabase']
delta = chunkdata['delta']
flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS
content.append(node)
link = linkmapper(cs)
if node in self.nodemap:
# this can happen if two branches make the same change
chain = node
continue
for p in (p1, p2):
if p not in self.nodemap:
raise LookupError(p, self.indexfile,
_('unknown parent'))
if deltabase not in self.nodemap:
raise LookupError(deltabase, self.indexfile,
_('unknown delta base'))
baserev = self.rev(deltabase)
if baserev != nullrev and self.iscensored(baserev):
# if base is censored, delta must be full replacement in a
# single patch operation
hlen = struct.calcsize(">lll")
oldlen = self.rawsize(baserev)
newlen = len(delta) - hlen
if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
raise error.CensoredBaseError(self.indexfile,
self.node(baserev))
if not flags and self._peek_iscensored(baserev, delta, flush):
flags |= REVIDX_ISCENSORED
# We assume consumers of addrevisioncb will want to retrieve
# the added revision, which will require a call to
# revision(). revision() will fast path if there is a cache
# hit. So, we tell _addrevision() to always cache in this case.
chain = self._addrevision(node, None, transaction, link,
p1, p2, flags, (baserev, delta),
ifh, dfh,
alwayscache=bool(addrevisioncb))
if addrevisioncb:
addrevisioncb(self, chain)
if not dfh and not self._inline:
# addrevision switched from inline to conventional
# reopen the index
ifh.close()
dfh = self.opener(self.datafile, "a+")
ifh = self.opener(self.indexfile, "a+")
finally:
if dfh:
dfh.close()
ifh.close()
return content
def iscensored(self, rev):
"""Check if a file revision is censored."""
return False
def _peek_iscensored(self, baserev, delta, flush):
"""Quickly check if a delta produces a censored revision."""
return False
def getstrippoint(self, minlink):
"""find the minimum rev that must be stripped to strip the linkrev
Returns a tuple containing the minimum rev and a set of all revs that
have linkrevs that will be broken by this strip.
"""
brokenrevs = set()
strippoint = len(self)
heads = {}
futurelargelinkrevs = set()
for head in self.headrevs():
headlinkrev = self.linkrev(head)
heads[head] = headlinkrev
if headlinkrev >= minlink:
futurelargelinkrevs.add(headlinkrev)
# This algorithm involves walking down the rev graph, starting at the
# heads. Since the revs are topologically sorted according to linkrev,
# once all head linkrevs are below the minlink, we know there are
# no more revs that could have a linkrev greater than minlink.
# So we can stop walking.
while futurelargelinkrevs:
strippoint -= 1
linkrev = heads.pop(strippoint)
if linkrev < minlink:
brokenrevs.add(strippoint)
else:
futurelargelinkrevs.remove(linkrev)
for p in self.parentrevs(strippoint):
if p != nullrev:
plinkrev = self.linkrev(p)
heads[p] = plinkrev
if plinkrev >= minlink:
futurelargelinkrevs.add(plinkrev)
return strippoint, brokenrevs
def strip(self, minlink, transaction):
"""truncate the revlog on the first revision with a linkrev >= minlink
This function is called when we're stripping revision minlink and
its descendants from the repository.
We have to remove all revisions with linkrev >= minlink, because
the equivalent changelog revisions will be renumbered after the
strip.
So we truncate the revlog on the first of these revisions, and
trust that the caller has saved the revisions that shouldn't be
removed and that it'll re-add them after this truncation.
"""
if len(self) == 0:
return
rev, _ = self.getstrippoint(minlink)
if rev == len(self):
return
# first truncate the files on disk
end = self.start(rev)
if not self._inline:
transaction.add(self.datafile, end)
end = rev * self._io.size
else:
end += rev * self._io.size
transaction.add(self.indexfile, end)
# then reset internal state in memory to forget those revisions
self._cache = None
self._chaininfocache = {}
self._chunkclear()
for x in xrange(rev, len(self)):
del self.nodemap[self.node(x)]
del self.index[rev:-1]
def checksize(self):
expected = 0
if len(self):
expected = max(0, self.end(len(self) - 1))
try:
f = self.opener(self.datafile)
f.seek(0, 2)
actual = f.tell()
f.close()
dd = actual - expected
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
dd = 0
try:
f = self.opener(self.indexfile)
f.seek(0, 2)
actual = f.tell()
f.close()
s = self._io.size
i = max(0, actual // s)
di = actual - (i * s)
if self._inline:
databytes = 0
for r in self:
databytes += max(0, self.length(r))
dd = 0
di = actual - len(self) * s - databytes
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
di = 0
return (dd, di)
def files(self):
res = [self.indexfile]
if not self._inline:
res.append(self.datafile)
return res
|
seewindcn/tortoisehg
|
src/mercurial/revlog.py
|
Python
|
gpl-2.0
| 63,606
|
[
"VisIt"
] |
dd665594f865517f8b4f646441d37fd033af117eb04973f6ea3852a4a3f36fb7
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A Gaussian Mixture Model clustering program using MLlib.
"""
from __future__ import print_function
import sys
if sys.version >= '3':
long = int
import random
import argparse
import numpy as np
from pyspark import SparkConf, SparkContext
from pyspark.mllib.clustering import GaussianMixture
def parseVector(line):
return np.array([float(x) for x in line.split(' ')])
if __name__ == "__main__":
"""
Parameters
----------
:param inputFile: Input file path which contains data points
:param k: Number of mixture components
:param convergenceTol: Convergence threshold. Default to 1e-3
:param maxIterations: Number of EM iterations to perform. Default to 100
:param seed: Random seed
"""
parser = argparse.ArgumentParser()
parser.add_argument('inputFile', help='Input File')
parser.add_argument('k', type=int, help='Number of clusters')
parser.add_argument('--convergenceTol', default=1e-3, type=float, help='convergence threshold')
parser.add_argument('--maxIterations', default=100, type=int, help='Number of iterations')
parser.add_argument('--seed', default=random.getrandbits(19),
type=long, help='Random seed')
args = parser.parse_args()
conf = SparkConf().setAppName("GMM")
sc = SparkContext(conf=conf)
lines = sc.textFile(args.inputFile)
data = lines.map(parseVector)
model = GaussianMixture.train(data, args.k, args.convergenceTol,
args.maxIterations, args.seed)
for i in range(args.k):
print(("weight = ", model.weights[i], "mu = ", model.gaussians[i].mu,
"sigma = ", model.gaussians[i].sigma.toArray()))
print("\n")
print(("The membership value of each vector to all mixture components (first 100): ",
model.predictSoft(data).take(100)))
print("\n")
print(("Cluster labels (first 100): ", model.predict(data).take(100)))
sc.stop()
|
lhfei/spark-in-action
|
spark-2.x/src/main/python/mllib/gaussian_mixture_model.py
|
Python
|
apache-2.0
| 2,857
|
[
"Gaussian"
] |
dc481cbe2691393ab353989f951d424e4333436c65ce4cf4b29aee9f38c91bb5
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','io.eventhero.tizebraprint.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','io.eventhero.tizebraprint.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','IoEventheroTizebraprintModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Debug-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Debug")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Debug")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('there is discrepancy between the architectures specified in module manifest and compiled binary.')
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
die('please update manifest to match module binary architectures.')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
|
eventhero/TiZebraPrint
|
iphone/build_debug.py
|
Python
|
mit
| 8,516
|
[
"VisIt"
] |
02e50079b687327b3a7f216d8ee6b70c834246d252c04b6ed315f457a3f8a426
|
import json
import urllib.parse
import uuid
import pytest
from ... import core
from ... import exceptions
from ... import parse
def provider_url(config):
qs = urllib.parse.urlencode(
{'configJSON': json.dumps(config, separators=(',', ':'))},
quote_via=urllib.parse.quote)
return ('https://rawgit.com/pelson/pyggybank/master/pyggybank/tests/'
'test_provider/start.html?{}'.format(qs))
class TestProvider(core.Provider):
names = ['Test provider {}'.format(uuid.uuid4())]
_attributes = ['password']
config = {"accounts": {
"page": "accounts_1"
},
"balance": {
"page": "balances_1"
},
"auth": [
{
"page": "login_1",
"pass": "Basic password"
}
]
}
domain = provider_url(config)
def authenticate(self, browser, credentials):
self.log.info('Visiting {}'.format(self.domain))
browser.visit(self.domain)
self.log.info("Clicking Let's go")
button = browser.find_by_xpath('''//input[contains(@value, "Let's g")]''')
button.first.click()
self.log.info("Inserting password")
browser.find_by_xpath("//input[@name='pass']").first.fill(credentials.password)
self.log.info("Clicking Login")
button = browser.find_by_xpath('''//input[contains(@value, "Login")]''')
button.first.click()
self.log.info("Checking for sucessful authentication")
# If the next page has a Login button, we clearly aren't authenticated.
button = browser.find_by_xpath('''//input[contains(@value, "Login")]''')
if button:
raise exceptions.AuthenticationError()
def balances(self, browser):
self.log.info('Navigating to balances screen')
balances = []
accounts = list(browser.find_by_xpath("//tr"))[1:]
for account in accounts:
print(account.find_by_xpath('td'))
id, name, acc_type, bal = [td.text for td in account.find_by_xpath('td')]
bal = parse.parse_currency(bal, 'GBP')
bal = {'id': id, 'name': name, 'amount': bal.float, 'currency': 'GBP', }
balances.append(bal)
return balances
@pytest.fixture(scope="module")
def browser():
from splinter import Browser
browser = Browser('chrome')
yield browser
browser.quit()
def auth_provider(browser, config):
provider = TestProvider
schema = provider.schema()
config, credentials = schema.extract_credentials(config)
p = provider.init_from_config(config)
# try:
p.authenticate(browser, credentials)
# except Exception as err:
# print(err)
# import pdb; pdb.set_trace()
# raise
return p
@pytest.mark.BROWSER
def test_basic_auth(browser):
config = {'password': 'Basic password'}
p = auth_provider(browser, config)
assert browser.is_text_present('This page is the simplest of balances in table form')
@pytest.mark.BROWSER
def test_invalid_auth(browser):
with pytest.raises(exceptions.AuthenticationError):
p = auth_provider(browser, {'password': 'Not correct'})
@pytest.mark.BROWSER
def test_balances(browser):
config = {'password': 'Basic password'}
p = auth_provider(browser, config)
bal = p.balances(browser)
expected = [{'amount': -124.86, 'currency': 'GBP', 'id': '1', 'name': 'My first account'},
{'amount': -198765.43, 'currency': 'GBP', 'id': '2', 'name': 'My mortgage'},
{'amount': 1234.56, 'currency': 'GBP', 'id': '3', 'name': 'My first saver'}]
assert bal == expected
|
pelson/pyggybank
|
pyggybank/tests/providers/test_testprovider.py
|
Python
|
bsd-3-clause
| 3,724
|
[
"VisIt"
] |
149b0abcce712c7813deb1398250f0718391402ec51706aab52020d8a286c61f
|
# $Id$
#
# Copyright (C) 2004-2012 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" uses pymol to interact with molecules
"""
from rdkit import Chem
import xmlrpclib,os,tempfile
_server=None
class MolViewer(object):
def __init__(self,host=None,port=9123,force=0,**kwargs):
global _server
if not force and _server is not None:
self.server=_server
else:
if not host:
host=os.environ.get('PYMOL_RPCHOST','localhost')
_server=None
serv = xmlrpclib.Server('http://%s:%d'%(host,port))
serv.ping()
_server = serv
self.server=serv
self.InitializePyMol()
def InitializePyMol(self):
""" does some initializations to set up PyMol according to our
tastes
"""
self.server.do('set valence,1')
self.server.do('set stick_rad,0.15')
self.server.do('set mouse_selection_mode,0')
self.server.do('set line_width,2')
self.server.do('set selection_width,10')
self.server.do('set auto_zoom,0')
def DeleteAll(self):
" blows out everything in the viewer "
self.server.deleteAll()
def DeleteAllExcept(self,excludes):
" deletes everything except the items in the provided list of arguments "
allNames = self.server.getNames('*',False)
for nm in allNames:
if nm not in excludes:
self.server.deleteObject(nm)
def LoadFile(self,filename,name,showOnly=False):
""" calls pymol's "load" command on the given filename; the loaded object
is assigned the name "name"
"""
if showOnly:
self.DeleteAll()
id = self.server.loadFile(filename,name)
return id
def ShowMol(self,mol,name='molecule',showOnly=True,highlightFeatures=[],
molB="",confId=-1,zoom=True):
""" special case for displaying a molecule or mol block """
if not molB:
molB = Chem.MolToMolBlock(mol,confId=confId)
server = self.server
if not zoom:
self.server.do('view rdinterface,store')
if showOnly:
self.DeleteAll()
id = server.loadMolBlock(molB,name)
if highlightFeatures:
nm = name+'-features'
conf = mol.GetConformer(confId)
for feat in highlightFeatures:
pt = [0.0,0.0,0.0]
for idx in feat:
loc = conf.GetAtomPosition(idx)
pt[0] += loc[0]/len(feat)
pt[1] += loc[1]/len(feat)
pt[2] += loc[2]/len(feat)
server.sphere(pt,0.2,(1,1,1),nm)
if zoom:
server.zoom('visible')
else:
self.server.do('view rdinterface,recall')
return id
def GetSelectedAtoms(self,whichSelection=None):
" returns the selected atoms "
if not whichSelection:
sels = self.server.getNames('selections')
if sels:
whichSelection = sels[-1]
else:
whichSelection=None
if whichSelection:
items = self.server.index(whichSelection)
else:
items = []
return items
def SelectAtoms(self,itemId,atomIndices,selName='selection'):
" selects a set of atoms "
ids = '(id '
ids += ','.join(['%d'%(x+1) for x in atomIndices])
ids += ')'
cmd = 'select %s,%s and %s'%(selName,ids,itemId)
self.server.do(cmd)
def HighlightAtoms(self,indices,where,extraHighlight=False):
" highlights a set of atoms "
if extraHighlight:
idxText = ','.join(['%s and (id %d)'%(where,x) for x in indices])
self.server.do('edit %s'%idxText)
else:
idxText = ' or '.join(['id %d'%x for x in indices])
self.server.do('select selection, %s and (%s)'%(where,idxText))
def SetDisplayStyle(self,obj,style=''):
" change the display style of the specified object "
self.server.do('hide everything,%s'%(obj,))
if style:
self.server.do('show %s,%s'%(style,obj))
def SelectProteinNeighborhood(self,aroundObj,inObj,distance=5.0,
name='neighborhood',showSurface=False):
""" selects the area of a protein around a specified object/selection name;
optionally adds a surface to that """
self.server.do('select %(name)s,byres (%(aroundObj)s around %(distance)f) and %(inObj)s'%locals())
if showSurface:
self.server.do('show surface,%s'%name)
self.server.do('disable %s'%name)
def AddPharmacophore(self,locs,colors,label,sphereRad=0.5):
" adds a set of spheres "
self.server.do('view rdinterface,store')
self.server.resetCGO(label)
for i,loc in enumerate(locs):
self.server.sphere(loc,sphereRad,colors[i],label,1)
self.server.do('enable %s'%label)
self.server.do('view rdinterface,recall')
def SetDisplayUpdate(self,val):
if not val:
self.server.do('set defer_update,1')
else:
self.server.do('set defer_update,0')
def GetAtomCoords(self,sels):
" returns the coordinates of the selected atoms "
res = {}
for label,idx in sels:
coords = self.server.getAtomCoords('(%s and id %d)'%(label,idx))
res[(label,idx)] = coords
return res
def HideAll(self):
self.server.do('disable all')
def HideObject(self,objName):
self.server.do('disable %s'%objName)
def DisplayObject(self,objName):
self.server.do('enable %s'%objName)
def Redraw(self):
self.server.do('refresh')
def Zoom(self,objName):
self.server.zoom(objName)
def DisplayHBonds(self,objName,molName,proteinName,
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of h bonds between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",mode=2;\n"
cmd += "enable %(objName)s;"
cmd = cmd%locals()
self.server.do(cmd)
def DisplayCollisions(self,objName,molName,proteinName,distCutoff=3.0,
color='red',
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of collisions between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",%(distCutoff)f,mode=0;\n"
cmd += """enable %(objName)s
color %(color)s, %(objName)s"""
cmd = cmd%locals()
self.server.do(cmd)
def GetPNG(self,h=None,w=None):
import Image,time
fd = tempfile.NamedTemporaryFile(suffix='.png',delete=False)
fd.close()
self.server.do('png %s'%fd.name)
time.sleep(0.2) # <- wait a short period so that PyMol can finish
for i in range(10):
try:
img = Image.open(fd.name)
break
except IOError:
time.sleep(0.1)
os.unlink(fd.name)
fd=None
if h is not None or w is not None:
sz = img.size
if h is None:
h=sz[1]
if w is None:
w=sz[0]
if h<sz[1]:
frac = float(h)/sz[1]
w *= frac
w = int(w)
img=img.resize((w,h),True)
elif w<sz[0]:
frac = float(w)/sz[0]
h *= frac
h = int(h)
img=img.resize((w,h),True)
return img
|
rdkit/rdkit-orig
|
rdkit/Chem/PyMol.py
|
Python
|
bsd-3-clause
| 7,287
|
[
"PyMOL",
"RDKit"
] |
ee77bbe435148df19785500eec325bb91b75acf9b3d9235ed91ca3c1c44217a9
|
#!/usr/bin/env python
""" refresh CS
"""
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
res = gRefresher.forceRefresh()
if not res['OK']:
print res['Message']
|
andresailer/DIRAC
|
tests/Jenkins/dirac-refresh-cs.py
|
Python
|
gpl-3.0
| 244
|
[
"DIRAC"
] |
17925f95abb467a7859a16d330b0e613f2fe3c00a50e9718f5e7f656cbfad97b
|
## \example atom/charmm_forcefield_verbose.py
# In this example, a PDB file is read in and scored using the CHARMM forcefield. It is similar to the 'charmm_forcefield.py' example, but fully works through each step of the procedure using lower-level IMP classes. This is useful if you want to customize the way in which the forcefield is applied.
#
from __future__ import print_function
import IMP.atom
import IMP.container
import sys
IMP.setup_from_argv(sys.argv, "CHARMM forcefield verbose")
# Create an IMP model and add a heavy atom-only protein from a PDB file
m = IMP.Model()
prot = IMP.atom.read_pdb(IMP.atom.get_example_path("example_protein.pdb"), m,
IMP.atom.NonWaterNonHydrogenPDBSelector())
# Read in the CHARMM heavy atom topology and parameter files
ff = IMP.atom.get_heavy_atom_CHARMM_parameters()
# Using the CHARMM libraries, determine the ideal topology (atoms and their
# connectivity) for the PDB file's primary sequence
topology = ff.create_topology(prot)
# Typically this modifies the C and N termini of each chain in the protein by
# applying the CHARMM CTER and NTER patches. Patches can also be manually
# applied at this point, e.g. to add disulfide bridges.
topology.apply_default_patches()
# Each atom is mapped to its CHARMM type. These are needed to look up bond
# lengths, Lennard-Jones radii etc. in the CHARMM parameter file. Atom types
# can also be manually assigned at this point using the CHARMMAtom decorator.
topology.add_atom_types(prot)
# Remove any atoms that are in the PDB file but not in the topology, and add
# in any that are in the topology but not the PDB.
IMP.atom.remove_charmm_untyped_atoms(prot)
topology.add_missing_atoms(prot)
# Construct Cartesian coordinates for any atoms that were added
topology.add_coordinates(prot)
# Generate and return lists of bonds, angles, dihedrals and impropers for
# the protein. Each is a Particle in the model which defines the 2, 3 or 4
# atoms that are bonded, and adds parameters such as ideal bond length
# and force constant. Note that bonds and impropers are explicitly listed
# in the CHARMM topology file, while angles and dihedrals are generated
# automatically from an existing set of bonds. These particles only define the
# bonds, but do not score them or exclude them from the nonbonded list.
bonds = topology.add_bonds(prot)
angles = ff.create_angles(bonds)
dihedrals = ff.create_dihedrals(bonds)
impropers = topology.add_impropers(prot)
# Maintain stereochemistry by scoring bonds, angles, dihedrals and impropers
# Score all of the bonds. This is done by combining IMP 'building blocks':
# - A ListSingletonContainer simply manages a list of the bond particles.
# - A BondSingletonScore, when given a bond particle, scores the bond by
# calculating the distance between the two atoms it bonds, subtracting the
# ideal value, and weighting the result by the bond's "stiffness", such that
# an "ideal" bond scores zero, and bonds away from equilibrium score non-zero.
# It then hands off to a UnaryFunction to actually penalize the value. In
# this case, a Harmonic UnaryFunction is used with a mean of zero, so that
# bond lengths are harmonically restrained.
# - A SingletonsRestraint simply goes through each of the bonds in the
# container and scores each one in turn.
cont = IMP.container.ListSingletonContainer(m, bonds, "bonds")
bss = IMP.atom.BondSingletonScore(IMP.core.Harmonic(0, 1))
r = IMP.container.SingletonsRestraint(bss, cont, "bonds")
rs = [r]
# Score angles, dihedrals, and impropers. In the CHARMM forcefield, angles and
# impropers are harmonically restrained, so this is the same as for bonds.
# Dihedrals are scored internally by a periodic (cosine) function.
cont = IMP.container.ListSingletonContainer(m, angles, "angles")
bss = IMP.atom.AngleSingletonScore(IMP.core.Harmonic(0, 1))
r = IMP.container.SingletonsRestraint(bss, cont, "angles")
rs.append(r)
cont = IMP.container.ListSingletonContainer(m, dihedrals, "dihedrals")
bss = IMP.atom.DihedralSingletonScore()
r = IMP.container.SingletonsRestraint(bss, cont, "dihedrals")
rs.append(r)
cont = IMP.container.ListSingletonContainer(m, impropers, "impropers")
bss = IMP.atom.ImproperSingletonScore(IMP.core.Harmonic(0, 1))
rs.append(IMP.container.SingletonsRestraint(bss, cont, "improppers"))
# Add non-bonded interaction (in this case, Lennard-Jones). This needs to
# know the radii and well depths for each atom, so add them from the forcefield
# (they can also be assigned manually using the XYZR or LennardJones
# decorators):
ff.add_radii(prot)
ff.add_well_depths(prot)
# Get a list of all atoms in the protein, and put it in a container
atoms = IMP.atom.get_by_type(prot, IMP.atom.ATOM_TYPE)
cont = IMP.container.ListSingletonContainer(m, atoms)
# Add a restraint for the Lennard-Jones interaction. Again, this is built from
# a collection of building blocks. First, a ClosePairContainer maintains a list
# of all pairs of Particles that are close. A StereochemistryPairFilter is used
# to exclude atoms from this list that are bonded to each other or are involved
# in an angle or dihedral (1-3 or 1-4 interaction). Then, a
# LennardJonesPairScore scores a pair of atoms with the Lennard-Jones potential.
# Finally, a PairsRestraint is used which simply applies the
# LennardJonesPairScore to each pair in the ClosePairContainer.
nbl = IMP.container.ClosePairContainer(cont, 4.0)
pair_filter = IMP.atom.StereochemistryPairFilter()
pair_filter.set_bonds(bonds)
pair_filter.set_angles(angles)
pair_filter.set_dihedrals(dihedrals)
nbl.add_pair_filter(pair_filter)
sf = IMP.atom.ForceSwitch(6.0, 7.0)
ps = IMP.atom.LennardJonesPairScore(sf)
rs.append(IMP.container.PairsRestraint(ps, nbl))
score_func = IMP.core.RestraintsScoringFunction(rs)
# it gets awfully slow with internal checks
IMP.set_check_level(IMP.USAGE)
# Finally, evaluate the score of the whole system (without derivatives)
print(score_func.evaluate(False))
|
shanot/imp
|
modules/atom/examples/charmm_forcefield_verbose.py
|
Python
|
gpl-3.0
| 5,965
|
[
"CHARMM"
] |
c35ee8dccf11d32267f0e2dfb98423b7e832556c0a44fe5c8ef050b0a8db68d2
|
from __future__ import unicode_literals
import re
from pig_util import outputSchema
positive_words = set([
"addicting", "addictingly", "admirable", "admirably", "admire", "admires", "admiring", "adorable",
"adorably", "adore", "adored", "adoring", "amaze", "amazed", "amazes", "amazing",
"angelic", "appeal", "appealed", "appealing", "appealingly", "appeals", "attentive", "attracted",
"attractive", "awesome", "awesomely", "beautiful", "beautifully", "best", "bliss", "bold",
"boldly", "boss", "bravo", "breath-taking", "breathtaking", "calm", "cared", "cares",
"caring", "celebrate", "celebrated", "celebrating", "charm", "charmed", "charming", "charmingly",
"cheer", "cheered", "cheerful", "cheerfully", "classic", "colorful", "colorfully", "colourful",
"colourfully", "comfort", "comfortably", "comforting", "comfortingly", "comfy", "competent", "competently",
"congrats", "congratulations", "considerate", "considerately", "cool", "coolest", "courteous", "courteously",
"creative", "creatively", "cute", "dapper", "dazzled", "dazzling", "dazzlingly", "delicious",
"deliciously", "delight", "delighted", "delightful", "delightfully", "dope", "dynamic", "ecstatic",
"efficient", "efficiently", "elegant", "elegantly", "eloquent", "embrace", "embraced", "embracing",
"energetic", "energetically", "engaging", "engagingly", "enjoy", "enjoyed", "enjoying", "enticing",
"enticingly", "essential", "excellent", "excellently", "exceptional", "excitement", "exciting", "excitingly",
"exquisite", "exquisitely", "fantastic", "fascinating", "fashionable", "fashionably", "fast", "favorite",
"favorites", "favourite", "favourites", "fetching", "fine", "flattering", "fond", "fondly",
"friendly", "fulfilling", "fun", "generous", "generously", "genius", "genuine", "glamor",
"glamorous", "glamorously", "glamour", "glamourous", "glamourously", "glorious", "good", "good-looking",
"goodlooking", "gorgeous", "gorgeously", "grace", "graceful", "gracefully", "great", "handsome",
"happiness", "happy", "healthy", "heartwarming", "heavenly", "helpful", "hip", "imaginative",
"incredible", "ingenious", "innovative", "inspirational", "inspired", "inspiring", "intelligent", "interesting",
"invigorating", "irresistible", "irresistibly", "joy", "kawaii", "keen", "knowledgeable", "liked",
"lively", "love", "loved", "lovely", "loving", "lucky", "luscious", "lusciously",
"magical", "magnificent", "marvelous", "marvelously", "masterful", "masterfully", "memorable", "mmm",
"mmmm", "mmmmm", "natural", "neat", "neatly", "nice", "nicely", "nifty",
"optimistic", "outstanding", "outstandingly", "overjoyed", "pampered", "peace", "peaceful", "phenomenal",
"pleasant", "pleasantly", "pleasurable", "pleasurably", "plentiful", "polished", "popular", "positive",
"powerful", "powerfully", "precious", "prettily", "pretty", "profound", "proud", "proudly",
"quick", "quickly", "rad", "radiant", "rejoice", "rejoiced", "rejoicing", "remarkable",
"respectable", "respectably", "respectful", "satisfied", "serenity", "sexily", "sexy", "shiny",
"skilled", "skillful", "slick", "smooth", "spectacular", "spicy", "splendid", "straightforward",
"stunning", "stylish", "stylishly", "sublime", "succulent", "super", "superb", "swell",
"tastily", "tasty", "terrific", "thorough", "thrilled", "thrilling", "tranquil", "tranquility",
"treat", "unreal", "vivacious", "vivid", "warm", "welcoming", "well-spoken", "win",
"wonderful", "wonderfully", "wow", "wowed", "wowing", "wows", "yummy"
])
negative_words = set([
"a-hole", "a-holes", "abandoned", "abandoning", "abuse", "abused", "abysmal", "aggressive",
"agonizing", "agonizingly", "agony", "ahole", "aholes", "alarming", "anger", "angering",
"angry", "appalled", "appalling", "appalls", "argue", "argued", "arguing", "ashamed",
"asinine", "asshole", "assholes", "atrocious", "awful", "awkward", "bad", "badgered",
"badgering", "banal", "bankrupt", "barbaric", "bastard", "bastards", "belittled", "belligerent",
"berated", "bigot", "bigoted", "bigots", "bitch", "bland", "bonkers", "boring",
"bossed-around", "bothered", "bothering", "bothers", "broke", "broken", "broken-hearted", "brokenhearted",
"brutal", "buggy", "bummed", "calamitous", "callous", "cheated", "cheating", "claustrophobic",
"clumsy", "colorless", "colourless", "conceited", "condescending", "confused", "confuses", "confusing",
"contentious", "corrupt", "coward", "cowardly", "cowards", "creeper", "crestfallen", "cringe-worthy",
"cringeworthy", "cruel", "cunt", "cunts", "cursed", "cynical", "d-bag", "d-bags",
"dbag", "dbags", "deal-breaker", "deal-breaking", "degrading", "dehumanized", "dehumanizing", "delay",
"delayed", "deplorable", "depressed", "despicable", "destroyed", "destroying", "destroys", "detestable",
"dick", "dicks", "died", "dirty", "disappointed", "disappointing", "disappoints", "disaster",
"disastrous", "disastrously", "disgruntled", "disgusted", "disgusting", "disgustingly", "dismal", "disorganized",
"disrespectful", "douche", "douchebag", "douchebags", "dour", "dreadful", "dull", "dumb",
"egocentric", "egotistical", "embarrassing", "enraging", "erred", "erring", "error", "excruciating",
"fail", "failed", "failing", "fails", "failure", "fake", "falsehood", "flaw",
"flawed", "flaws", "folly", "fool", "foolish", "fools", "forgettable", "fought",
"freaked", "freaking", "frustrated", "frustrating", "fubar", "fuck", "fuckers", "fugly",
"furious", "gaudy", "ghastly", "gloomy", "greed", "greedy", "grief", "grieve",
"grieved", "grieving", "grouchy", "hassle", "hate", "hated", "hating", "heart-breaking",
"heart-broken", "heartbreaking", "heartbroken", "hellish", "hellishly", "helpless", "horrendous", "horrible",
"horribly", "horrific", "horrifically", "humiliated", "humiliating", "hurt", "hurts", "icky",
"idiot", "idiotic", "ignorant", "ignored", "ill", "immature", "inane", "inattentive",
"incompetent", "incompetently", "incomplete", "inconsiderate", "incorrect", "indoctrinated", "inelegant", "infuriating",
"infuriatingly", "insecure", "insignificant", "insufficient", "insult", "insulted", "insulting", "interrupted",
"jaded", "kill", "lame", "loathsome", "lonely", "lose", "loser", "lost",
"mad", "mean", "mediocre", "melodramatic", "miserable", "miserably", "misery", "missing",
"mistake", "mistreated", "moron", "moronic", "mother-fucker", "mother-fuckers", "motherfucker", "motherfuckers",
"mourn", "mourned", "mugged", "nagging", "nasty", "nazi", "nazis", "negative",
"neurotic", "nonsense", "noo", "nooo", "nooooo", "nut-job", "nut-jobs", "nutjob",
"nutjobs", "objectification", "objectified", "objectifying", "obscene", "odious", "offended", "oppressive",
"over-sensitive", "pain", "painfully", "panic", "panicked", "panicking", "paranoid", "pathetic",
"pessimistic", "pestered", "pestering", "petty", "pissed", "poor", "poorly", "powerless",
"prejudiced", "pretentious", "psychopath", "psychopathic", "psychopaths", "psychotic", "quarrelling", "quarrelsome",
"racist", "rage", "repugnant", "repulsive", "resent", "resentful", "resenting", "retarded",
"revolting", "ridicule", "ridiculed", "ridicules", "robbed", "rude", "sad", "sadistic",
"sadness", "scared", "screwed", "self-centered", "selfcentered", "selfish", "shambolic", "shameful",
"shamefully", "shattered", "shit", "shitty", "shoddy", "sickening", "sloppily", "sloppy",
"slow", "slowly", "smothered", "snafu", "spiteful", "square", "squares", "stereotyped",
"stifled", "stressed", "stressful", "stressing", "stuck", "stuffy", "stupid", "sub-par",
"subpar", "substandard", "suck", "sucks", "suffer", "suffering", "suicide", "superficial",
"terrible", "terribly", "train-wreck", "trainwreck", "ugly", "unappealing", "unattractive", "uncomfortable",
"uncomfy", "unengaging", "unengagingly", "unenticing", "unenticingly", "unexceptionable", "unfair", "unfashionable",
"unfashionably", "unfriendly", "ungraceful", "ungrateful", "unhelpful", "unimpressive", "uninspired", "unjust",
"unlucky", "unnotable", "unpleasant", "unpleasantly", "unsatisfactory", "unsatisfied", "unseemly", "unwelcoming",
"upset", "vicious", "vindictive", "weak", "wreck", "wrecked", "wrecking", "wrecks",
"wtf", "yucky"
])
intensifier_words = set([
"absolutely", "amazingly", "exceptionally", "fantastically", "fucking", "incredibly", "obscenely", "phenomenally",
"profoundly", "really", "remarkably", "ridiculously", "so", "spectacularly", "stunningly", "such",
"totally", "unquestionably", "very"
])
negation_words = set([
"didn't", "don't", "lack", "lacked", "no-one", "nobody", "noone", "not", "wasn't",
])
# Decorator to help udf's handle null input like Pig does (just ignore it and return null)
def null_if_input_null(fn):
def wrapped(*args, **kwargs):
for arg in args:
if arg is None:
return None
for k, v in kwargs.items():
if v is None:
return None
return fn(*args, **kwargs)
wrapped.__name__ = fn.__name__
wrapped.__doc__ = fn.__doc__
wrapped.__dict__.update(fn.__dict__)
return wrapped
# Returns whether a word is the positive_words / negative_words sets defined in this library
# Pig 0.9.2 does not have a boolean datatype (this is implemented in Pig 0.10+), so we use 1 = true, 0 = false.
@outputSchema("in_word_set: int")
@null_if_input_null
def in_word_set(word, set_name):
if set_name == 'positive':
return (1 if word in positive_words else 0);
elif set_name == 'negative':
return (1 if word in negative_words else 0);
else:
raise ValueError('Invalid set name. Should be "positive" or "negative".')
# Estimates whether an ordered bag of words expresses a positive (> 0) or negative (< 0) sentiment.
# Accounts for intensifier words (ex. "very") and negations (ex. "not"), but only if they
# directly precede a word expressing positive/negative sentiment
# (chains, ex. intensifier -> negation -> positive-word are handled)
@outputSchema("sentiment: double")
@null_if_input_null
def sentiment(words_bag):
if len(words_bag) == 0:
return 0.0
score = 0.0
words = [t[0] for t in words_bag if len(t) > 0]
positive = [i for i, word in enumerate(words) if word in positive_words]
negative = [i for i, word in enumerate(words) if word in negative_words]
for idx in positive:
word_score = 1.0
num_negations = 0
i = idx - 1
while i >= 0:
if words[i] in intensifier_words:
word_score += 1
elif words[i] in negation_words:
num_negations += 1
else:
break
i -= 1
score += word_score * ((-0.5 ** num_negations) if num_negations > 0 else 1)
for idx in negative:
word_score = -1.0
num_negations = 0
i = idx - 1
while i >= 0:
if words[i] in intensifier_words:
word_score += 1
elif words[i] in negation_words:
num_negations += 1
else:
break
i -= 1
score += word_score * ((-0.5 ** num_negations) if num_negations > 0 else 1)
return score
|
pombredanne/jkarn-pub-test
|
udfs/jython/twitter_sentiment.py
|
Python
|
apache-2.0
| 11,150
|
[
"exciting"
] |
d414e07b1e913752dcc6c727592e45a46658a15b7aef0d741fda6ec2d55c24a6
|
"""
EX_TRISURF3D.PY
Material de apoio para o post "Gráficos tridimensionais no Python
[PARTE I]", no Programando Ciência.
Support material for the blog post "Three-dimensional plots on Python
[PART I]", on Programando Ciência.
* Autor/Author: Alexandre 'Jaguar' Fioravante de Siqueira
* Contato/Contact: http://www.programandociencia.com/sobre/
* Material de apoio/Support material:
http://www.github.com/alexandrejaguar/programandociencia
* Para citar esse material, por favor utilize a referência abaixo:
DE SIQUEIRA, Alexandre Fioravante. Gráficos tridimensionais no Python
[PARTE I]. Campinas: Programando Ciência, 28 de agosto de 2015.
Disponível em:
http://programandociencia.com/2015/08/28/
graficos-tridimensionais-no-python-parte-i-three-dimensional-plots-on-python-part-i/.
Acesso em: <DATA DE ACESSO>.
* In order to cite this material, please use the reference below
(this is a Chicago-like style):
de Siqueira, Alexandre Fioravante. “Three-dimensional plots on Python
[PART I]”. Programando Ciência. 2015, August 28. Available at
http://programandociencia.com/2015/08/28/
graficos-tridimensionais-no-python-parte-i-three-dimensional-plots-on-python-part-i/.
Access date: <ACCESS DATE>.
Copyright (C) Alexandre Fioravante de Siqueira
Este programa é um software livre; você pode redistribuí-lo e/ou
modificá-lo dentro dos termos da Licença Pública Geral GNU como publicada
pela Fundação do Software Livre (FSF); na versão 3 da Licença, ou qualquer
versão posterior.
Este programa é distribuído na esperança de que possa ser útil, mas SEM
NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer
MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para
maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU junto com
este programa. Se não, veja <http://www.gnu.org/licenses/>.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from mpl_toolkits.mplot3d import Axes3D # ajuda do Welton Vaz: https://github.com/weltonvaz
import matplotlib.pyplot as plt
import numpy as np
n_angles = 72
n_radii = 4
# An array of radii
# Does not include radius r=0, this is to eliminate duplicate points
radii = np.linspace(0.125, 1.0, n_radii)
# An array of angles
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=True)
# Repeat all angles for each radius
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
# Convert polar (radii, angles) coords to cartesian (x, y) coords
# (0, 0) is added here. There are no duplicate points in the (x, y) plane
x = np.append(0, (radii*np.cos(angles)).flatten())
y = np.append(0, (radii*np.sin(angles)).flatten())
# Surface
z = np.sin(-x*(y**2))+np.cos((x**2)*-y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, cmap='Oranges', linewidth=0.1)
plt.show()
|
alexandrejaguar/programandociencia
|
20150828-graf3dpython/ex_trisurf3d.py
|
Python
|
gpl-2.0
| 3,403
|
[
"Jaguar"
] |
6efc0e66516aa5a003b472106d9b2d485ca1b363f9ba8dfb6c91ef7ed29a3d4b
|
# Copyright 2004-2008 by M de Hoon.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Implements the Lowess function for nonparametric regression.
Functions:
lowess Fit a smooth nonparametric regression curve to a scatterplot.
For more information, see
William S. Cleveland: "Robust locally weighted regression and smoothing
scatterplots", Journal of the American Statistical Association, December 1979,
volume 74, number 368, pp. 829-836.
William S. Cleveland and Susan J. Devlin: "Locally weighted regression: An
approach to regression analysis by local fitting", Journal of the American
Statistical Association, September 1988, volume 83, number 403, pp. 596-610.
"""
import sys
# Add path to Bio
sys.path.append('../..')
from __future__ import print_function
from Bio._py3k import range
import numpy
try:
from Bio.Cluster import median
# The function median in Bio.Cluster is faster than the function median
# in NumPy, as it does not require a full sort.
except ImportError as x:
# Use the median function in NumPy if Bio.Cluster is not available
from numpy import median
def lowess(x, y, f=2. / 3., iter=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
x and y should be numpy float arrays of equal length. The return value is
also a numpy float array of that length.
e.g.
>>> import numpy
>>> x = numpy.array([4, 4, 7, 7, 8, 9, 10, 10, 10, 11, 11, 12, 12, 12,
... 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16,
... 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 20, 20, 20, 20,
... 20, 22, 23, 24, 24, 24, 24, 25], numpy.float)
>>> y = numpy.array([2, 10, 4, 22, 16, 10, 18, 26, 34, 17, 28, 14, 20, 24,
... 28, 26, 34, 34, 46, 26, 36, 60, 80, 20, 26, 54, 32, 40,
... 32, 40, 50, 42, 56, 76, 84, 36, 46, 68, 32, 48, 52, 56,
... 64, 66, 54, 70, 92, 93, 120, 85], numpy.float)
>>> result = lowess(x, y)
>>> len(result)
50
>>> print("[%0.2f, ..., %0.2f]" % (result[0], result[-1]))
[4.85, ..., 84.98]
"""
n = len(x)
r = int(numpy.ceil(f * n))
h = [numpy.sort(abs(x - x[i]))[r] for i in range(n)]
w = numpy.clip(abs(([x] - numpy.transpose([x])) / h), 0.0, 1.0)
w = 1 - w * w * w
w = w * w * w
yest = numpy.zeros(n)
delta = numpy.ones(n)
for iteration in range(iter):
for i in range(n):
weights = delta * w[:, i]
weights_mul_x = weights * x
b1 = numpy.dot(weights, y)
b2 = numpy.dot(weights_mul_x, y)
A11 = sum(weights)
A12 = sum(weights_mul_x)
A21 = A12
A22 = numpy.dot(weights_mul_x, x)
determinant = A11 * A22 - A12 * A21
beta1 = (A22 * b1 - A12 * b2) / determinant
beta2 = (A11 * b2 - A21 * b1) / determinant
yest[i] = beta1 + beta2 * x[i]
residuals = y - yest
s = median(abs(residuals))
delta[:] = numpy.clip(residuals / (6 * s), -1, 1)
delta[:] = 1 - delta * delta
delta[:] = delta * delta
return yest
def _test():
"""Run the Bio.Statistics.lowess module's doctests."""
print("Running doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Statistics/lowess.py
|
Python
|
gpl-2.0
| 4,058
|
[
"Biopython"
] |
eeb929b8ee872917f1e0dad5088ad839caf2b469d5e7199329cb1b6bd00ab403
|
#
# Copyright (C) 2013-2022 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
import espressomd.reaction_ensemble
import unittest as ut
class ReactionMethods(ut.TestCase):
"""Test the reaction methods interface."""
system = espressomd.System(box_l=[10., 10., 10.])
system.cell_system.skin = 0.4
def tearDown(self):
self.system.part.clear()
def check_interface(self, method, kT, exclusion_radius, gamma):
def check_reaction_parameters(reactions, parameters):
for reaction, params in zip(reactions, parameters):
for key in reaction.required_keys():
self.assertEqual(getattr(reaction, key), params[key])
reaction_forward = {
'gamma': gamma,
'reactant_types': [5],
'reactant_coefficients': [1],
'product_types': [2, 3],
'product_coefficients': [1, 1],
'default_charges': {5: 0, 2: 0, 3: 0},
}
reaction_backward = {
'gamma': 1. / gamma,
'reactant_types': reaction_forward['product_types'],
'reactant_coefficients': reaction_forward['product_coefficients'],
'product_types': reaction_forward['reactant_types'],
'product_coefficients': reaction_forward['reactant_coefficients'],
'default_charges': reaction_forward['default_charges'],
}
if isinstance(method, espressomd.reaction_ensemble.ConstantpHEnsemble):
method.add_reaction(gamma=reaction_forward['gamma'],
reactant_types=reaction_forward['reactant_types'],
product_types=reaction_forward['product_types'],
default_charges=reaction_forward['default_charges'])
else:
method.add_reaction(**reaction_forward)
reaction_parameters = (reaction_forward, reaction_backward)
# check getters and setters
self.assertAlmostEqual(method.kT, kT, delta=1e-10)
self.assertAlmostEqual(
method.exclusion_radius,
exclusion_radius,
delta=1e-10)
self.assertAlmostEqual(
method.get_volume(),
self.system.volume(),
delta=1e-10)
method.set_volume(volume=1.)
self.assertAlmostEqual(method.get_volume(), 1., delta=1e-10)
self.assertEqual(method.get_non_interacting_type(), 100)
method.set_non_interacting_type(type=9)
self.assertEqual(method.get_non_interacting_type(), 9)
if isinstance(method, espressomd.reaction_ensemble.ConstantpHEnsemble):
self.assertAlmostEqual(method.constant_pH, 10., delta=1e-10)
method.constant_pH = 8.
self.assertAlmostEqual(method.constant_pH, 8., delta=1e-10)
# check constraints
method.set_wall_constraints_in_z_direction(
slab_start_z=0.1, slab_end_z=0.9)
offsets = method.get_wall_constraints_in_z_direction()
self.assertAlmostEqual(offsets[0], 0.1, delta=1e-10)
self.assertAlmostEqual(offsets[1], 0.9, delta=1e-10)
method.remove_constraint()
# check status
status = method.get_status()
self.assertEqual(status['kT'], kT)
self.assertEqual(status['exclusion_radius'], exclusion_radius)
self.assertEqual(len(status['reactions']), 2)
for reaction_flat, params in zip(
status['reactions'], reaction_parameters):
for key in reaction_flat:
if key == 'gamma':
self.assertAlmostEqual(
reaction_flat[key], params[key], delta=1e-10)
else:
self.assertEqual(reaction_flat[key], params[key])
# check reactions
reactions = method.reactions
self.assertEqual(len(reactions), 2)
check_reaction_parameters(method.reactions, reaction_parameters)
# check reactions after parameter change
new_gamma = 634.
reaction_forward['gamma'] = new_gamma
reaction_backward['gamma'] = 1. / new_gamma
method.change_reaction_constant(reaction_id=0, gamma=new_gamma)
check_reaction_parameters(method.reactions, reaction_parameters)
status = method.get_status()
self.assertAlmostEqual(
status['reactions'][0]['gamma'],
reaction_forward['gamma'],
delta=1e-10)
self.assertAlmostEqual(
status['reactions'][1]['gamma'],
reaction_backward['gamma'],
delta=1e-10)
# check particle deletion
p1, _, p3 = self.system.part.add(
pos=3 * [(0., 0., 0.)], type=[5, 2, 3])
if isinstance(method, espressomd.reaction_ensemble.WidomInsertion):
potential_energy = method.calculate_particle_insertion_potential_energy(
reaction_id=0)
self.assertEqual(potential_energy, 0.)
method.delete_particle(p_id=p3.id)
self.assertEqual(len(self.system.part), 2)
method.delete_particle(p_id=p1.id)
self.assertEqual(len(self.system.part), 1)
self.system.part.clear()
# check reaction deletion
method.delete_reaction(reaction_id=0)
self.assertEqual(len(method.reactions), 0)
def test_interface(self):
# reaction ensemble
method = espressomd.reaction_ensemble.ReactionEnsemble(
kT=1.5, exclusion_radius=0.8, seed=12)
self.check_interface(method, kT=1.5, exclusion_radius=0.8, gamma=1.2)
# constant pH ensemble
method = espressomd.reaction_ensemble.ConstantpHEnsemble(
kT=1.5, exclusion_radius=0.8, seed=12, constant_pH=10)
self.check_interface(method, kT=1.5, exclusion_radius=0.8, gamma=1.2)
# Widom insertion
method = espressomd.reaction_ensemble.WidomInsertion(kT=1.6, seed=12)
self.check_interface(method, kT=1.6, exclusion_radius=0., gamma=1.)
def test_exceptions(self):
single_reaction_params = {
'gamma': 1.,
'reactant_types': [4],
'reactant_coefficients': [1],
'product_types': [2, 3],
'product_coefficients': [1, 4],
}
reaction_params = {
'default_charges': {2: 0, 3: 0, 4: 0},
**single_reaction_params
}
widom = espressomd.reaction_ensemble.WidomInsertion(kT=1., seed=12)
method = espressomd.reaction_ensemble.ReactionEnsemble(
kT=1.5, exclusion_radius=0.8, seed=12)
method.add_reaction(**reaction_params)
widom.add_reaction(**reaction_params)
# check invalid reactions
err_msg = 'number of types and coefficients have to match'
with self.assertRaisesRegex(ValueError, f'reactants: {err_msg}'):
method.add_reaction(**{**reaction_params, 'reactant_types': []})
with self.assertRaisesRegex(ValueError, f'products: {err_msg}'):
method.add_reaction(**{**reaction_params, 'product_types': []})
# check charge conservation
err_msg = 'Reaction system is not charge neutral'
with self.assertRaisesRegex(ValueError, err_msg):
method.add_reaction(default_charges={2: 8, 3: 0, 4: -50},
**single_reaction_params)
with self.assertRaisesRegex(ValueError, err_msg):
method.add_reaction(default_charges={2: 1, 3: 0, 4: 1 + 1e-10},
**single_reaction_params)
# check invalid reaction id exceptions
# (note: reactions id = 2 * reactions index)
self.assertEqual(len(method.reactions), 2)
for i in [-2, -1, 1, 2, 3]:
with self.assertRaisesRegex(IndexError, 'This reaction is not present'):
method.delete_reaction(reaction_id=i)
with self.assertRaisesRegex(IndexError, 'This reaction is not present'):
method.get_acceptance_rate_reaction(reaction_id=2 * i)
# check constraint exceptions
set_cyl_constraint = method.set_cylindrical_constraint_in_z_direction
set_slab_constraint = method.set_wall_constraints_in_z_direction
get_slab_constraint = method.get_wall_constraints_in_z_direction
err_msg = "no slab constraint is currently active"
with self.assertRaisesRegex(RuntimeError, err_msg):
get_slab_constraint()
set_slab_constraint(slab_start_z=0.1, slab_end_z=0.9)
method.remove_constraint()
with self.assertRaisesRegex(RuntimeError, err_msg):
get_slab_constraint()
# check invalid constraints
with self.assertRaisesRegex(ValueError, "center_x is outside the box"):
set_cyl_constraint(center_x=100., center_y=1., radius=1.)
with self.assertRaisesRegex(ValueError, "center_x is outside the box"):
set_cyl_constraint(center_x=-10., center_y=1., radius=1.)
with self.assertRaisesRegex(ValueError, "center_y is outside the box"):
set_cyl_constraint(center_y=100., center_x=1., radius=1.)
with self.assertRaisesRegex(ValueError, "center_y is outside the box"):
set_cyl_constraint(center_y=-10., center_x=1., radius=1.)
with self.assertRaisesRegex(ValueError, "radius is invalid"):
set_cyl_constraint(center_x=1., center_y=1., radius=-1.)
with self.assertRaisesRegex(ValueError, "slab_start_z is outside the box"):
set_slab_constraint(slab_start_z=100., slab_end_z=1.)
with self.assertRaisesRegex(ValueError, "slab_start_z is outside the box"):
set_slab_constraint(slab_start_z=-10., slab_end_z=1.)
with self.assertRaisesRegex(ValueError, "slab_end_z is outside the box"):
set_slab_constraint(slab_end_z=100., slab_start_z=1.)
with self.assertRaisesRegex(ValueError, "slab_end_z is outside the box"):
set_slab_constraint(slab_end_z=-10., slab_start_z=1.)
with self.assertRaisesRegex(ValueError, "slab_end_z must be >= slab_start_z"):
set_slab_constraint(slab_start_z=10., slab_end_z=1.)
# check exceptions for missing particles
with self.assertRaisesRegex(RuntimeError, "Particle id is greater than the max seen particle id"):
method.delete_particle(p_id=0)
with self.assertRaisesRegex(RuntimeError, "Trying to remove some non-existing particles from the system via the inverse Widom scheme"):
widom.calculate_particle_insertion_potential_energy(reaction_id=0)
# check other exceptions
with self.assertRaisesRegex(ValueError, "Invalid value for 'volume'"):
method.set_volume(volume=-10.)
with self.assertRaisesRegex(RuntimeError, r"unknown method 'unknown\(\)'"):
method.call_method('unknown', x=1)
err_msg = r"Only the following keys can be given as keyword arguments: \[.+\], got \[.+\] \(unknown \['x'\]\)"
with self.assertRaisesRegex(ValueError, err_msg):
espressomd.reaction_ensemble.SingleReaction(
x=1, **single_reaction_params)
with self.assertRaisesRegex(ValueError, err_msg):
espressomd.reaction_ensemble.ReactionEnsemble(
kT=1., exclusion_radius=1., seed=12, x=1)
with self.assertRaisesRegex(ValueError, err_msg):
espressomd.reaction_ensemble.ConstantpHEnsemble(
kT=1., exclusion_radius=1., seed=12, x=1, constant_pH=2)
with self.assertRaisesRegex(ValueError, err_msg):
espressomd.reaction_ensemble.WidomInsertion(
kT=1., seed=12, x=1)
with self.assertRaisesRegex(ValueError, "Invalid value for 'kT'"):
espressomd.reaction_ensemble.ReactionEnsemble(
kT=-1., exclusion_radius=1., seed=12)
with self.assertRaisesRegex(ValueError, "Invalid value for 'exclusion_radius'"):
espressomd.reaction_ensemble.ReactionEnsemble(
kT=1., exclusion_radius=-1., seed=12)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/reaction_methods.py
|
Python
|
gpl-3.0
| 12,747
|
[
"ESPResSo"
] |
de7fc1df215ea610b8485abd11c3fe72de3bfaad82f8ed85fb47d9756e8ce5f8
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
import itertools
import platform
import sys
import numpy as np
from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
import pytest
from pytest import raises as assert_raises
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_approx_equal,
assert_, assert_allclose, assert_array_almost_equal_nulp,
suppress_warnings)
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipe, ellipk, ellipkm1
from scipy.special import elliprc, elliprd, elliprf, elliprg, elliprj
from scipy.special import mathieu_odd_coef, mathieu_even_coef
from scipy.special._testutils import with_special_errors, \
assert_func_equal, FuncData
import math
class TestCephes:
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_binom_nooverflow_8346(self):
# Test (binom(n, k) doesn't overflow prematurely */
dataset = [
(1000, 500, 2.70288240945436551e+299),
(1002, 501, 1.08007396880791225e+300),
(1004, 502, 4.31599279169058121e+300),
(1006, 503, 1.72468101616263781e+301),
(1008, 504, 6.89188009236419153e+301),
(1010, 505, 2.75402257948335448e+302),
(1012, 506, 1.10052048531923757e+303),
(1014, 507, 4.39774063758732849e+303),
(1016, 508, 1.75736486108312519e+304),
(1018, 509, 7.02255427788423734e+304),
(1020, 510, 2.80626776829962255e+305),
(1022, 511, 1.12140876377061240e+306),
(1024, 512, 4.48125455209897109e+306),
(1026, 513, 1.79075474304149900e+307),
(1028, 514, 7.15605105487789676e+307)
]
dataset = np.asarray(dataset)
FuncData(cephes.binom, dataset, (0, 1), 2, rtol=1e-12).check()
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
# Each row holds (x, nu, lam, expected_value)
# These values were computed using Wolfram Alpha with
# CDF[NoncentralChiSquareDistribution[nu, lam], x]
values = np.array([
[25.00, 20.0, 400, 4.1210655112396197139e-57],
[25.00, 8.00, 250, 2.3988026526832425878e-29],
[0.001, 8.00, 40., 5.3761806201366039084e-24],
[0.010, 8.00, 40., 5.45396231055999457039e-20],
[20.00, 2.00, 107, 1.39390743555819597802e-9],
[22.50, 2.00, 107, 7.11803307138105870671e-9],
[25.00, 2.00, 107, 3.11041244829864897313e-8],
[3.000, 2.00, 1.0, 0.62064365321954362734],
[350.0, 300., 10., 0.93880128006276407710],
[100.0, 13.5, 10., 0.99999999650104210949],
[700.0, 20.0, 400, 0.99999999925680650105],
[150.0, 13.5, 10., 0.99999999999999983046],
[160.0, 13.5, 10., 0.99999999999999999518], # 1.0
])
cdf = cephes.chndtr(values[:, 0], values[:, 1], values[:, 2])
assert_allclose(cdf, values[:, 3], rtol=1e-12)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0), 0.0)
def test_erf_symmetry(self):
x = 5.905732037710919
assert_equal(cephes.erf(x) + cephes.erf(-x), 0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0), 1.0)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
assert_equal(cephes.expm1(np.inf), np.inf)
assert_equal(cephes.expm1(-np.inf), -1)
assert_equal(cephes.expm1(np.nan), np.nan)
def test_expm1_complex(self):
expm1 = cephes.expm1
assert_equal(expm1(0 + 0j), 0 + 0j)
assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
@pytest.mark.xfail(reason='The real part of expm1(z) bad at these points')
def test_expm1_complex_hard(self):
# The real part of this function is difficult to evaluate when
# z.real = -log(cos(z.imag)).
y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
x = -np.log(np.cos(y))
z = x + 1j*y
# evaluate using mpmath.expm1 with dps=1000
expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
2.4289354732893695e-18+0.20271003550867248j,
4.5235500262585768e-17+0.30933624960962319j,
7.8234305217489006e-17-3.3805150062465863j,
-1.3685191953697676e-16-225.95084645419513j,
8.7175620481291045e-17+2.2371609442247422j])
found = cephes.expm1(z)
# this passes.
assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
# this fails.
assert_array_almost_equal_nulp(found.real, expected.real, 20)
def test_fdtr(self):
assert_equal(cephes.fdtr(1, 1, 0), 0.0)
# Computed using Wolfram Alpha: CDF[FRatioDistribution[1e-6, 5], 10]
assert_allclose(cephes.fdtr(1e-6, 5, 10), 0.9999940790193488,
rtol=1e-12)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1, 1, 0), 1.0)
# Computed using Wolfram Alpha:
# 1 - CDF[FRatioDistribution[2, 1/10], 1e10]
assert_allclose(cephes.fdtrc(2, 0.1, 1e10), 0.27223784621293512,
rtol=1e-12)
def test_fdtri(self):
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
# From Wolfram Alpha:
# CDF[FRatioDistribution[1/10, 1], 3] = 0.8756751669632105666874...
p = 0.8756751669632105666874
assert_allclose(cephes.fdtri(0.1, 1, p), 3, rtol=1e-12)
@pytest.mark.xfail(reason='Returns nan on i686.')
def test_fdtri_mysterious_failure(self):
assert_allclose(cephes.fdtri(1, 1, 0.5), 1)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtr_inf(self):
assert_equal(cephes.gdtr(1,1,np.inf),1.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0), 1.0)
def test_kolmogp(self):
assert_equal(cephes._kolmogp(0), -0.0)
def test_kolmogc(self):
assert_equal(cephes._kolmogc(0), 0.0)
def test_kolmogci(self):
assert_equal(cephes._kolmogci(0), 0.0)
assert_(np.isnan(cephes._kolmogci(np.nan)))
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
log1p = cephes.log1p
assert_equal(log1p(0), 0.0)
assert_equal(log1p(-1), -np.inf)
assert_equal(log1p(-2), np.nan)
assert_equal(log1p(np.inf), np.inf)
def test_log1p_complex(self):
log1p = cephes.log1p
c = complex
assert_equal(log1p(0 + 0j), 0 + 0j)
assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1, 1, 1, 0), 0.0)
f = [0.5, 1, 1.5]
p = cephes.ncfdtr(2, 3, 1.5, f)
assert_allclose(cephes.ncfdtri(2, 3, 1.5, p), f)
def test_ncfdtridfd(self):
dfd = [1, 2, 3]
p = cephes.ncfdtr(2, dfd, 0.25, 15)
assert_allclose(cephes.ncfdtridfd(2, p, 0.25, 15), dfd)
def test_ncfdtridfn(self):
dfn = [0.1, 1, 2, 3, 1e4]
p = cephes.ncfdtr(dfn, 2, 0.25, 15)
assert_allclose(cephes.ncfdtridfn(p, 2, 0.25, 15), dfn, rtol=1e-5)
def test_ncfdtrinc(self):
nc = [0.5, 1.5, 2.0]
p = cephes.ncfdtr(2, 3, nc, 15)
assert_allclose(cephes.ncfdtrinc(2, 3, p, 15), nc)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_allclose(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "floating point number truncated to an integer")
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovp(self):
assert_equal(cephes._smirnovp(1, .1), -1)
assert_equal(cephes._smirnovp(2, 0.75), -2*(0.25)**(2-1))
assert_equal(cephes._smirnovp(3, 0.75), -3*(0.25)**(3-1))
assert_(np.isnan(cephes._smirnovp(1, np.nan)))
def test_smirnovc(self):
assert_equal(cephes._smirnovc(1,.1),0.1)
assert_(np.isnan(cephes._smirnovc(1,np.nan)))
x10 = np.linspace(0, 1, 11, endpoint=True)
assert_almost_equal(cephes._smirnovc(3, x10), 1-cephes.smirnov(3, x10))
x4 = np.linspace(0, 1, 5, endpoint=True)
assert_almost_equal(cephes._smirnovc(4, x4), 1-cephes.smirnov(4, x4))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_smirnovci(self):
assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.4)),0.4)
assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.6)),0.6)
assert_(np.isnan(cephes._smirnovci(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry:
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre:
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly:
def test_besselpoly(self):
pass
class TestKelvin:
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli:
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta:
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics:
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert_equal(special.comb(100, 50, exact=True), expected)
@pytest.mark.parametrize("repetition", [True, False])
@pytest.mark.parametrize("legacy", [True, False])
@pytest.mark.parametrize("k", [3.5, 3])
@pytest.mark.parametrize("N", [4.5, 4])
def test_comb_legacy(self, N, k, legacy, repetition):
# test is only relevant for exact=True
if legacy and (N != int(N) or k != int(k)):
with pytest.warns(
DeprecationWarning,
match=r"Non-integer arguments are currently being cast to",
):
result = special.comb(N, k, exact=True, legacy=legacy,
repetition=repetition)
else:
result = special.comb(N, k, exact=True, legacy=legacy,
repetition=repetition)
if legacy:
# for exact=True and legacy=True, cast input arguments, else don't
if repetition:
# the casting in legacy mode happens AFTER transforming N & k,
# so rounding can change (e.g. both floats, but sum to int);
# hence we need to emulate the repetition-transformation here
N, k = int(N + k - 1), int(k)
repetition = False
else:
N, k = int(N), int(k)
# expected result is the same as with exact=False
expected = special.comb(N, k, legacy=legacy, repetition=repetition)
assert_equal(result, expected)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric:
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg:
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip:
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, np.full_like(f, 1.0259330100195334), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, np.full_like(f1, 5.1296650500976675), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, np.full_like(f, 0.84442884574781019), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, np.full_like(f1, 3.3471442287390509), 4)
class TestEllipCarlson(object):
"""Test for Carlson elliptic integrals ellipr[cdfgj].
The special values used in these tests can be found in Sec. 3 of Carlson
(1994), https://arxiv.org/abs/math/9409227
"""
def test_elliprc(self):
assert_allclose(elliprc(1, 1), 1)
assert elliprc(1, inf) == 0.0
assert isnan(elliprc(1, 0))
assert elliprc(1, complex(1, inf)) == 0.0
args = array([[0.0, 0.25],
[2.25, 2.0],
[0.0, 1.0j],
[-1.0j, 1.0j],
[0.25, -2.0],
[1.0j, -1.0]])
expected_results = array([np.pi,
np.log(2.0),
1.1107207345396 * (1.0-1.0j),
1.2260849569072-0.34471136988768j,
np.log(2.0) / 3.0,
0.77778596920447+0.19832484993429j])
for i, arr in enumerate(args):
assert_allclose(elliprc(*arr), expected_results[i])
def test_elliprd(self):
assert_allclose(elliprd(1, 1, 1), 1)
assert_allclose(elliprd(0, 2, 1) / 3.0, 0.59907011736779610371)
assert elliprd(1, 1, inf) == 0.0
assert np.isinf(elliprd(1, 1, 0))
assert np.isinf(elliprd(1, 1, complex(0, 0)))
assert np.isinf(elliprd(0, 1, complex(0, 0)))
assert isnan(elliprd(1, 1, -np.finfo(np.double).tiny / 2.0))
assert isnan(elliprd(1, 1, complex(-1, 0)))
args = array([[0.0, 2.0, 1.0],
[2.0, 3.0, 4.0],
[1.0j, -1.0j, 2.0],
[0.0, 1.0j, -1.0j],
[0.0, -1.0+1.0j, 1.0j],
[-2.0-1.0j, -1.0j, -1.0+1.0j]])
expected_results = array([1.7972103521034,
0.16510527294261,
0.65933854154220,
1.2708196271910+2.7811120159521j,
-1.8577235439239-0.96193450888839j,
1.8249027393704-1.2218475784827j])
for i, arr in enumerate(args):
assert_allclose(elliprd(*arr), expected_results[i])
def test_elliprf(self):
assert_allclose(elliprf(1, 1, 1), 1)
assert_allclose(elliprf(0, 1, 2), 1.31102877714605990523)
assert elliprf(1, inf, 1) == 0.0
assert np.isinf(elliprf(0, 1, 0))
assert isnan(elliprf(1, 1, -1))
assert elliprf(complex(inf), 0, 1) == 0.0
assert isnan(elliprf(1, 1, complex(-inf, 1)))
args = array([[1.0, 2.0, 0.0],
[1.0j, -1.0j, 0.0],
[0.5, 1.0, 0.0],
[-1.0+1.0j, 1.0j, 0.0],
[2.0, 3.0, 4.0],
[1.0j, -1.0j, 2.0],
[-1.0+1.0j, 1.0j, 1.0-1.0j]])
expected_results = array([1.3110287771461,
1.8540746773014,
1.8540746773014,
0.79612586584234-1.2138566698365j,
0.58408284167715,
1.0441445654064,
0.93912050218619-0.53296252018635j])
for i, arr in enumerate(args):
assert_allclose(elliprf(*arr), expected_results[i])
def test_elliprg(self):
assert_allclose(elliprg(1, 1, 1), 1)
assert_allclose(elliprg(0, 0, 1), 0.5)
assert_allclose(elliprg(0, 0, 0), 0)
assert np.isinf(elliprg(1, inf, 1))
assert np.isinf(elliprg(complex(inf), 1, 1))
args = array([[0.0, 16.0, 16.0],
[2.0, 3.0, 4.0],
[0.0, 1.0j, -1.0j],
[-1.0+1.0j, 1.0j, 0.0],
[-1.0j, -1.0+1.0j, 1.0j],
[0.0, 0.0796, 4.0]])
expected_results = array([np.pi,
1.7255030280692,
0.42360654239699,
0.44660591677018+0.70768352357515j,
0.36023392184473+0.40348623401722j,
1.0284758090288])
for i, arr in enumerate(args):
assert_allclose(elliprg(*arr), expected_results[i])
def test_elliprj(self):
assert_allclose(elliprj(1, 1, 1, 1), 1)
assert elliprj(1, 1, inf, 1) == 0.0
assert isnan(elliprj(1, 0, 0, 0))
assert isnan(elliprj(-1, 1, 1, 1))
assert elliprj(1, 1, 1, inf) == 0.0
args = array([[0.0, 1.0, 2.0, 3.0],
[2.0, 3.0, 4.0, 5.0],
[2.0, 3.0, 4.0, -1.0+1.0j],
[1.0j, -1.0j, 0.0, 2.0],
[-1.0+1.0j, -1.0-1.0j, 1.0, 2.0],
[1.0j, -1.0j, 0.0, 1.0-1.0j],
[-1.0+1.0j, -1.0-1.0j, 1.0, -3.0+1.0j],
[2.0, 3.0, 4.0, -0.5], # Cauchy principal value
[2.0, 3.0, 4.0, -5.0]]) # Cauchy principal value
expected_results = array([0.77688623778582,
0.14297579667157,
0.13613945827771-0.38207561624427j,
1.6490011662711,
0.94148358841220,
1.8260115229009+1.2290661908643j,
-0.61127970812028-1.0684038390007j,
0.24723819703052, # Cauchy principal value
-0.12711230042964]) # Caucny principal value
for i, arr in enumerate(args):
assert_allclose(elliprj(*arr), expected_results[i])
@pytest.mark.xfail(reason="Insufficient accuracy on 32-bit")
def test_elliprj_hard(self):
assert_allclose(elliprj(6.483625725195452e-08,
1.1649136528196886e-27,
3.6767340167168e+13,
0.493704617023468),
8.63426920644241857617477551054e-6,
rtol=5e-15, atol=1e-20)
assert_allclose(elliprj(14.375105857849121,
9.993988969725365e-11,
1.72844262269944e-26,
5.898871222598245e-06),
829774.1424801627252574054378691828,
rtol=5e-15, atol=1e-20)
class TestEllipLegendreCarlsonIdentities(object):
"""Test identities expressing the Legendre elliptic integrals in terms
of Carlson's symmetric integrals. These identities can be found
in the DLMF https://dlmf.nist.gov/19.25#i .
"""
def setup_class(self):
self.m_n1_1 = np.arange(-1., 1., 0.01)
# For double, this is -(2**1024)
self.max_neg = finfo(float_).min
# Lots of very negative numbers
self.very_neg_m = -1. * 2.**arange(-1 +
np.log2(-self.max_neg), 0.,
-1.)
self.ms_up_to_1 = np.concatenate(([self.max_neg],
self.very_neg_m,
self.m_n1_1))
def test_k(self):
"""Test identity:
K(m) = R_F(0, 1-m, 1)
"""
m = self.ms_up_to_1
assert_allclose(ellipk(m), elliprf(0., 1.-m, 1.))
def test_km1(self):
"""Test identity:
K(m) = R_F(0, 1-m, 1)
But with the ellipkm1 function
"""
# For double, this is 2**-1022
tiny = finfo(float_).tiny
# All these small powers of 2, up to 2**-1
m1 = tiny * 2.**arange(0., -np.log2(tiny))
assert_allclose(ellipkm1(m1), elliprf(0., m1, 1.))
def test_e(self):
"""Test identity:
E(m) = 2*R_G(0, 1-k^2, 1)
"""
m = self.ms_up_to_1
assert_allclose(ellipe(m), 2.*elliprg(0., 1.-m, 1.))
class TestErf:
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
with np.errstate(all='ignore'):
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
class TestEuler:
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_allclose(eu0, [1], rtol=1e-15)
assert_allclose(eu1, [1, 0], rtol=1e-15)
assert_allclose(eu2, [1, 0, -1], rtol=1e-15)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
with np.errstate(all='ignore'):
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
assert_almost_equal(errmax, 0.0, 14)
class TestExp:
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions:
def test_factorial(self):
# Some known values, float math
assert_array_almost_equal(special.factorial(0), 1)
assert_array_almost_equal(special.factorial(1), 1)
assert_array_almost_equal(special.factorial(2), 2)
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
[[120, 6], [24, 6]])
# Some known values, integer math
assert_equal(special.factorial(0, exact=True), 1)
assert_equal(special.factorial(1, exact=True), 1)
assert_equal(special.factorial(2, exact=True), 2)
assert_equal(special.factorial(5, exact=True), 120)
assert_equal(special.factorial(15, exact=True), 1307674368000)
# ndarray shape is maintained
assert_equal(special.factorial([7, 4, 15, 10], exact=True),
[5040, 24, 1307674368000, 3628800])
assert_equal(special.factorial([[5, 3], [4, 3]], True),
[[120, 6], [24, 6]])
# object arrays
assert_equal(special.factorial(np.arange(-3, 22), True),
special.factorial(np.arange(-3, 22), False))
# int64 array
assert_equal(special.factorial(np.arange(-3, 15), True),
special.factorial(np.arange(-3, 15), False))
# int32 array
assert_equal(special.factorial(np.arange(-3, 5), True),
special.factorial(np.arange(-3, 5), False))
# Consistent output for n < 0
for exact in (True, False):
assert_array_equal(0, special.factorial(-3, exact))
assert_array_equal([1, 2, 0, 0],
special.factorial([1, 2, -5, -4], exact))
for n in range(0, 22):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])
assert_allclose(float(correct), special.factorial(n, False))
assert_allclose(float(correct), special.factorial([n], False)[0])
# Compare exact=True vs False, scalar vs array
assert_array_equal(special.factorial(n, True),
special.factorial(n, False))
assert_array_equal(special.factorial([n], True),
special.factorial([n], False))
@pytest.mark.parametrize('x, exact', [
(1, True),
(1, False),
(np.array(1), True),
(np.array(1), False),
])
def test_factorial_0d_return_type(self, x, exact):
assert np.isscalar(special.factorial(x, exact=exact))
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
@pytest.mark.parametrize('x, exact', [
(np.nan, True),
(np.nan, False),
(np.array([np.nan]), True),
(np.array([np.nan]), False),
])
def test_nan_inputs(self, x, exact):
result = special.factorial(x, exact=exact)
assert_(np.isnan(result))
# GH-13122: special.factorial() argument should be an array of integers.
# On Python 3.10, math.factorial() reject float.
# On Python 3.9, a DeprecationWarning is emitted.
# A numpy array casts all integers to float if the array contains a
# single NaN.
@pytest.mark.skipif(sys.version_info >= (3, 10),
reason="Python 3.10+ math.factorial() requires int")
def test_mixed_nan_inputs(self):
x = np.array([np.nan, 1, 2, 3, np.nan])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Using factorial\\(\\) with floats is deprecated")
result = special.factorial(x, exact=True)
assert_equal(np.array([np.nan, 1, 2, 6, np.nan]), result)
result = special.factorial(x, exact=False)
assert_equal(np.array([np.nan, 1, 2, 6, np.nan]), result)
class TestFresnel:
@pytest.mark.parametrize("z, s, c", [
# some positive value
(.5, 0.064732432859999287, 0.49234422587144644),
(.5 + .0j, 0.064732432859999287, 0.49234422587144644),
# negative half annulus
# https://github.com/scipy/scipy/issues/12309
# Reference values can be reproduced with
# https://www.wolframalpha.com/input/?i=FresnelS%5B-2.0+%2B+0.1i%5D
# https://www.wolframalpha.com/input/?i=FresnelC%5B-2.0+%2B+0.1i%5D
(
-2.0 + 0.1j,
-0.3109538687728942-0.0005870728836383176j,
-0.4879956866358554+0.10670801832903172j
),
(
-0.1 - 1.5j,
-0.03918309471866977+0.7197508454568574j,
0.09605692502968956-0.43625191013617465j
),
# a different algorithm kicks in for "large" values, i.e., |z| >= 4.5,
# make sure to test both float and complex values; a different
# algorithm is used
(6.0, 0.44696076, 0.49953147),
(6.0 + 0.0j, 0.44696076, 0.49953147),
(6.0j, -0.44696076j, 0.49953147j),
(-6.0 + 0.0j, -0.44696076, -0.49953147),
(-6.0j, 0.44696076j, -0.49953147j),
# inf
(np.inf, 0.5, 0.5),
(-np.inf, -0.5, -0.5),
])
def test_fresnel_values(self, z, s, c):
frs = array(special.fresnel(z))
assert_array_almost_equal(frs, array([s, c]), 8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma:
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_allclose(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel:
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper:
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp0f1_gh5764(self):
# Just checks the point that failed; there's a more systematic
# test in test_mpmath
res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
# The expected value was generated using mpmath
assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# https://github.com/scipy/scipy/issues/1186 (Trac #659)
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f1_gh2282(self):
hyp = special.hyp1f1(0.5, 1.5, -1000)
assert_almost_equal(hyp, 0.028024956081989643, 12)
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel:
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_allclose(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_allclose(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_allclose(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_allclose(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_allclose(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_allclose(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
v = [-120, -100.3, -20., -10., -1., -.5, 0., 1., 12.49, 120., 301]
z = [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, 700.6, 1300,
10003]
yield from itertools.product(v, z)
# check half-integers; these are problematic points at least
# for cephes/iv
yield from itertools.product(0.5 + arange(-60, 60), [3.5])
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_allclose(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
with np.errstate(all='ignore'):
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
@pytest.mark.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
with np.errstate(all='ignore'):
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_allclose(special.jv(3, 4), 0.43017147387562193)
assert_allclose(special.jv(301, 1300), 0.0183487151115275)
assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_allclose(special.jv(-1, 1), -0.4400505857449335)
assert_allclose(special.jv(-2, 1), 0.1149034849319005)
assert_allclose(special.yv(-1, 1), 0.7812128213002887)
assert_allclose(special.yv(-2, 1), -1.650682606816255)
assert_allclose(special.iv(-1, 1), 0.5651591039924851)
assert_allclose(special.iv(-2, 1), 0.1357476697670383)
assert_allclose(special.kv(-1, 1), 0.6019072301972347)
assert_allclose(special.kv(-2, 1), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952)
assert_allclose(special.yv(-0.5, 1), 0.6713967071418031)
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
assert_allclose(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335)
assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005)
assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887)
assert_allclose(special.yv(-2, 1+0j), -1.650682606816255)
assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851)
assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383)
assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347)
assert_allclose(special.kv(-2, 1+0j), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_allclose(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_allclose(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_gh_7909(self):
assert_(special.kv(1.5, 0) == np.inf)
assert_(special.kve(1.5, 0) == np.inf)
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_allclose(special.iv(1, 700), 1.528500390233901e302)
assert_allclose(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_allclose(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_allclose(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre:
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre:
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda:
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p:
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions:
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
with np.errstate(all='ignore'):
lp = special.lpmv(-1,-1,.001)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu:
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral:
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq:
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder:
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
special.pbdv(1,.2)
1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma:
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq:
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi:
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian:
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati:
def test_riccati_jn(self):
N, x = 2, 0.2
S = np.empty((N, N))
for n in range(N):
j = special.spherical_jn(n, x)
jp = special.spherical_jn(n, x, derivative=True)
S[0,n] = x*j
S[1,n] = x*jp + j
assert_array_almost_equal(S, special.riccati_jn(n, x), 8)
def test_riccati_yn(self):
N, x = 2, 0.2
C = np.empty((N, N))
for n in range(N):
y = special.spherical_yn(n, x)
yp = special.spherical_yn(n, x, derivative=True)
C[0,n] = x*y
C[1,n] = x*yp + y
assert_array_almost_equal(C, special.riccati_yn(n, x), 8)
class TestRound:
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# https://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
assert_array_almost_equal(sh(0,0,0,0),
0.5/sqrt(pi))
assert_array_almost_equal(sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
assert_array_almost_equal(sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
assert_array_almost_equal(sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
assert_array_almost_equal(sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
assert_array_almost_equal(sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestStruve:
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_allclose(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_allclose(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_allclose(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_ch2_inf():
assert_equal(special.chdtr(0.7,np.inf), 1.0)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
rtol = 1e-13
# Gauss's constant
assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186,
rtol=rtol)
# These values were computed using Wolfram Alpha, with the
# function ArithmeticGeometricMean[a, b].
agm13 = 1.863616783244897
agm15 = 2.604008190530940
agm35 = 3.936235503649555
assert_allclose(special.agm([[1], [3]], [1, 3, 5]),
[[1, agm13, agm15],
[agm13, 3, agm35]], rtol=rtol)
# Computed by the iteration formula using mpmath,
# with mpmath.mp.prec = 1000:
agm12 = 1.4567910310469068
assert_allclose(special.agm(1, 2), agm12, rtol=rtol)
assert_allclose(special.agm(2, 1), agm12, rtol=rtol)
assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol)
assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol)
assert_allclose(special.agm(13, 123456789.5), 11111458.498599306,
rtol=rtol)
assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol)
assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol)
assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178,
rtol=rtol)
assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177,
rtol=rtol)
assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152,
rtol=rtol)
fi = np.finfo(1.0)
assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305,
rtol=rtol)
assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308,
rtol=rtol)
assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308,
rtol=rtol)
# zero, nan and inf cases.
assert_equal(special.agm(0, 0), 0)
assert_equal(special.agm(99, 0), 0)
assert_equal(special.agm(-1, 10), np.nan)
assert_equal(special.agm(0, np.inf), np.nan)
assert_equal(special.agm(np.inf, 0), np.nan)
assert_equal(special.agm(0, -np.inf), np.nan)
assert_equal(special.agm(-np.inf, 0), np.nan)
assert_equal(special.agm(np.inf, -np.inf), np.nan)
assert_equal(special.agm(-np.inf, np.inf), np.nan)
assert_equal(special.agm(1, np.nan), np.nan)
assert_equal(special.agm(np.nan, -1), np.nan)
assert_equal(special.agm(1, np.inf), np.inf)
assert_equal(special.agm(np.inf, 1), np.inf)
assert_equal(special.agm(-1, -np.inf), -np.inf)
assert_equal(special.agm(-np.inf, -1), -np.inf)
def test_legacy():
# Legacy behavior: truncating arguments to integers
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "floating point number truncated to an integer")
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
with np.errstate(invalid='ignore'):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
with np.errstate(invalid='ignore'):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber_small_r():
delta = 1.0
r = 1e-18
y = special.pseudo_huber(delta, r)
# expected computed with mpmath:
# import mpmath
# mpmath.mp.dps = 200
# r = mpmath.mpf(1e-18)
# expected = float(mpmath.sqrt(1 + r**2) - 1)
expected = 5.0000000000000005e-37
assert_allclose(y, expected, rtol=1e-13)
def test_runtime_warning():
with pytest.warns(RuntimeWarning,
match=r'Too many predicted coefficients'):
mathieu_odd_coef(1000, 1000)
with pytest.warns(RuntimeWarning,
match=r'Too many predicted coefficients'):
mathieu_even_coef(1000, 1000)
|
scipy/scipy
|
scipy/special/tests/test_basic.py
|
Python
|
bsd-3-clause
| 144,169
|
[
"Elk"
] |
7b806fd7597e79a9d8ccd618da160cb51b1a385f3210eaa68c3342ac456111db
|
#!/usr/bin/python
#
# Open SoundControl for Python
# Copyright (C) 2002 Daniel Holth, Clinton McChesney
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Daniel Holth <dholth@stetson.edu> or visit
# http://www.stetson.edu/~ProctoLogic/
#
# Changelog:
# 15 Nov. 2001:
# Removed dependency on Python 2.0 features.
# - dwh
# 13 Feb. 2002:
# Added a generic callback handler.
# - dwh
import struct
import math
import sys
import string
import pprint
from kivy.compat import string_types
def hexDump(data):
"""Useful utility; prints the string in hexadecimal"""
for i in range(len(data)):
sys.stdout.write("%2x " % (ord(data[i])))
if (i+1) % 8 == 0:
print(repr(data[i-7:i+1]))
if(len(data) % 8 != 0):
print(str.rjust("", 11), repr(data[i-len(data) % 8:i + 1]))
class OSCMessage:
"""Builds typetagged OSC messages."""
def __init__(self):
self.address = ""
self.clearData()
def setAddress(self, address):
self.address = address
def setMessage(self, message):
self.message = message
def setTypetags(self, typetags):
self.typetags = typetags
def clear(self):
self.address = ""
self.clearData()
def clearData(self):
self.typetags = ","
self.message = bytes()
def append(self, argument, typehint=None):
"""Appends data to the message,
updating the typetags based on
the argument's type.
If the argument is a blob (counted string)
pass in 'b' as typehint."""
if typehint == 'b':
binary = OSCBlob(argument)
else:
binary = OSCArgument(argument)
self.typetags = self.typetags + binary[0]
self.rawAppend(binary[1])
def rawAppend(self, data):
"""Appends raw data to the message. Use append()."""
self.message = self.message + data
def getBinary(self):
"""Returns the binary message (so far) with typetags."""
address = OSCArgument(self.address)[1]
typetags = OSCArgument(self.typetags)[1]
return address + typetags + self.message
def __repr__(self):
return self.getBinary()
def readString(data):
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def readBlob(data):
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def readInt(data):
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer."""
high, low = struct.unpack(">ll", data[0:8])
big = (int(high) << 32) + low
rest = data[8:]
return (big, rest)
def readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit double float."""
floater = struct.unpack(">d", data[0:8])
big = float(floater[0])
rest = data[8:]
return (big, rest)
def readFloat(data):
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def OSCBlob(next):
"""Convert a string into an OSC Blob,
returning a (typetag, data) tuple."""
if type(next) == type(""):
length = len(next)
padded = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (padded), length, next)
tag = 'b'
else:
tag = ''
binary = ''
return (tag, binary)
def OSCArgument(data):
"""Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple."""
if isinstance(data, string_types):
OSCstringLength = math.ceil((len(data)+1) / 4.0) * 4
binary = struct.pack(">%ds" % (OSCstringLength), data)
tag = "s"
elif isinstance(data, float):
binary = struct.pack(">f", data)
tag = "f"
elif isinstance(data, int):
binary = struct.pack(">i", data)
tag = "i"
else:
binary = ""
tag = ""
return (tag, binary)
def parseArgs(args):
"""Given a list of strings, produces a list
where those strings have been parsed (where
possible) as floats or integers."""
parsed = []
for arg in args:
print(arg)
arg = arg.strip()
interpretation = None
try:
interpretation = float(arg)
if string.find(arg, ".") == -1:
interpretation = int(interpretation)
except:
# Oh - it was a string.
interpretation = arg
pass
parsed.append(interpretation)
return parsed
def decodeOSC(data):
"""Converts a typetagged OSC message to a Python list."""
table = { "i" : readInt, "f" : readFloat, "s" : readString, "b" : readBlob, "d" : readDouble }
decoded = []
address, rest = readString(data)
typetags = ""
if address == "#bundle":
time, rest = readLong(rest)
# decoded.append(address)
# decoded.append(time)
while len(rest)>0:
length, rest = readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest) > 0:
typetags, rest = readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags[0] == ",":
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
print("Oops, typetag lacks the magic ,")
return decoded
class CallbackManager:
"""This utility class maps OSC addresses to callables.
The CallbackManager calls its callbacks with a list
of decoded OSC arguments, including the address and
the typetags as the first two arguments."""
def __init__(self):
self.callbacks = {}
self.add(self.unbundler, "#bundle")
def handle(self, data, source = None):
"""Given OSC data, tries to call the callback with the
right address."""
decoded = decodeOSC(data)
self.dispatch(decoded, source)
def dispatch(self, message, source = None):
"""Sends decoded OSC data to an appropriate calback"""
if type(message[0]) == list :
# smells like nested messages
for msg in message :
self.dispatch(msg, source)
elif type(message[0]) == str :
# got a single message
try:
address = message[0]
callbackfunction = self.callbacks[address]
except KeyError as e:
# address not found
print('address %s not found ' % address)
pprint.pprint(message)
except IndexError as e:
print('got malformed OSC message')
else:
try:
callbackfunction(message, source)
except Exception as e:
import traceback
print('OSC callback %s caused an error: %s' % (address, e))
traceback.print_exc()
print('---------------------')
raise
else:
raise ValueError("OSC message not recognized", message)
return
def add(self, callback, name):
"""Adds a callback to our set of callbacks,
or removes the callback with name if callback
is None."""
if callback == None:
del self.callbacks[name]
else:
self.callbacks[name] = callback
def unbundler(self, messages):
"""Dispatch the messages in a decoded bundle."""
# first two elements are #bundle and the time tag, rest are messages.
for message in messages[2:]:
self.dispatch(message)
if __name__ == "__main__":
hexDump("Welcome to the OSC testing program.")
print()
message = OSCMessage()
message.setAddress("/foo/play")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
hexDump(message.getBinary())
print("Making and unmaking a message..")
strings = OSCMessage()
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
hexDump(raw)
print("Retrieving arguments...")
data = raw
for i in range(6):
text, data = readString(data)
print(text)
number, data = readFloat(data)
print(number)
number, data = readFloat(data)
print(number)
number, data = readInt(data)
print(number)
hexDump(raw)
print(decodeOSC(raw))
print(decodeOSC(message.getBinary()))
print("Testing Blob types.")
blob = OSCMessage()
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
hexDump(blob.getBinary())
print(decodeOSC(blob.getBinary()))
def printingCallback(*stuff):
sys.stdout.write("Got: ")
for i in stuff:
sys.stdout.write(str(i) + " ")
sys.stdout.write("\n")
print("Testing the callback manager.")
c = CallbackManager()
c.add(printingCallback, "/print")
c.handle(message.getBinary())
message.setAddress("/print")
c.handle(message.getBinary())
print1 = OSCMessage()
print1.setAddress("/print")
print1.append("Hey man, that's cool.".encode('utf-8'))
print1.append(42)
print1.append(3.1415926)
c.handle(print1.getBinary())
bundle = OSCMessage()
bundle.setAddress("")
bundle.append("#bundle".encode('utf-8'))
bundle.append(0)
bundle.append(0)
bundle.append(print1.getBinary(), 'b')
bundle.append(print1.getBinary(), 'b')
bundlebinary = bundle.message
print("sending a bundle to the callback manager")
c.handle(bundlebinary)
|
JohnHowland/kivy
|
kivy/lib/osc/OSC.py
|
Python
|
mit
| 11,256
|
[
"VisIt"
] |
1ac5773f4fbaa161b4147b06dbd35136acb59699657eeb23d6f87d4494fd613d
|
from PyQt5.QtCore import QSettings, QStandardPaths
import os, cPickle
import uuid
class FileCache(object):
"""
A class to help cache objects based on file changes.
The primary use case was the need to cache Yaml data and action syntax
from a MOOSE executable.
This keeps the path of the executable as keys and it keeps the size
and creation time of the executable. It then creates a cache file
for that executable with the objects given pickled. If the executable
changes then the old cache is deleted and the new pickle data is
added.
"""
VERSION = 1
def __init__(self, settings_key, path, version=1):
"""
Input:
settings_key[str]: The key in QSettings
path[str]: The path to check time and size on.
version[int]: A version number of the stored data. If the format changes this can be bumped and current stored data will be deemed dirty.
"""
super(FileCache, self).__init__()
self.dirty = True
self.path = os.path.abspath(path)
self.settings_key = settings_key
self.settings = QSettings()
self.val = self.settings.value(settings_key, type=dict)
self.path_data = self.val.get(path, {})
self.stat = None
self.no_exist = False
self.data_version = version
self._setDirty()
def _setDirty(self):
"""
Sets the dirty flag.
If the path doesn't exist, or there is no cache data, it will be set as dirty
"""
try:
self.stat = os.stat(self.path)
except:
# If you can't stat it, then consider it dirty
self.dirty = True
self.no_exist = True
return
if (not self.path_data
or self.path_data.get("cache_version") != self.VERSION
or self.path_data.get("data_version") != self.data_version
or self.stat.st_ctime != self.path_data.get("ctime")
or self.stat.st_size != self.path_data.get("size")
):
self.dirty = True
return
self.dirty = False
def read(self):
"""
Read the stored objects from the cache.
Return:
None if the path is not in the cache, else the pickled data
"""
if self.dirty:
return None
try:
with open(self.path_data["pickle_path"], "r") as f:
data = cPickle.load(f)
return data
except:
return None
@staticmethod
def removeCacheFile( path):
try:
os.remove(path)
except:
pass
def _getCacheDir(self):
local_data_dir = QStandardPaths.standardLocations(QStandardPaths.CacheLocation)
path = os.path.abspath(local_data_dir[0])
try:
# Apparently the cache location might not exist
os.makedirs(path)
except:
pass
return path
def add(self, obj):
"""
Add obj to the cache for path
Input:
obj: The data to be pickled and cached
Return:
False if the obj is already in the cache, else True
"""
if not self.dirty or self.no_exist:
# Cache is up to date, no need to add anything
return False
if self.path_data:
self.removeCacheFile(self.path_data["pickle_path"])
cache_dir = self._getCacheDir()
filename = uuid.uuid4().hex
full_path = os.path.join(cache_dir, filename)
with open(full_path, "w") as f:
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
self.path_data = {"ctime": self.stat.st_ctime,
"size": self.stat.st_size,
"pickle_path": full_path,
"cache_version": self.VERSION,
"data_version": self.data_version,
}
self.val[self.path] = self.path_data
self.settings.setValue(self.settings_key, self.val)
self.dirty = False
return True
@staticmethod
def clearAll(settings_key):
"""
Clear the cache files and the value in QSettings
Input:
settings_key[str]: The key in QSettings
"""
settings = QSettings()
val = settings.value(settings_key, type=dict)
for key, val in val.items():
FileCache.removeCacheFile(val["pickle_path"])
settings.remove(settings_key)
|
yipenggao/moose
|
python/peacock/utils/FileCache.py
|
Python
|
lgpl-2.1
| 4,532
|
[
"MOOSE"
] |
f86bdb42c9e89f5c73825d14b51d54430e20d409055aa9ebede61721e333d0b2
|
# Orca
#
# Copyright 2016 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for SeaMonkey."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2016 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
from orca import cmdnames
from orca import debug
from orca import input_event
from orca import orca_state
from orca.scripts.toolkits import Gecko
class Script(Gecko.Script):
def __init__(self, app):
super().__init__(app)
def setupInputEventHandlers(self):
super().setupInputEventHandlers()
self.inputEventHandlers["togglePresentationModeHandler"] = \
input_event.InputEventHandler(
Script.togglePresentationMode,
cmdnames.TOGGLE_PRESENTATION_MODE)
self.inputEventHandlers["enableStickyFocusModeHandler"] = \
input_event.InputEventHandler(
Script.enableStickyFocusMode,
cmdnames.SET_FOCUS_MODE_STICKY)
self.inputEventHandlers["enableStickyBrowseModeHandler"] = \
input_event.InputEventHandler(
Script.enableStickyBrowseMode,
cmdnames.SET_BROWSE_MODE_STICKY)
def onBusyChanged(self, event):
"""Callback for object:state-changed:busy accessibility events."""
if self.utilities.isContentEditableWithEmbeddedObjects(event.source):
msg = "SEAMONKEY: Ignoring, event source is content editable"
debug.println(debug.LEVEL_INFO, msg, True)
return
table = self.utilities.getTable(orca_state.locusOfFocus)
if table and not self.utilities.isTextDocumentTable(table):
msg = "SEAMONKEY: Ignoring, locusOfFocus is %s" % orca_state.locusOfFocus
debug.println(debug.LEVEL_INFO, msg, True)
return
super().onBusyChanged(event)
def onFocus(self, event):
"""Callback for focus: accessibility events."""
# We should get proper state-changed events for these.
if self.utilities.inDocumentContent(event.source):
return
try:
focusRole = orca_state.locusOfFocus.getRole()
except:
msg = "ERROR: Exception getting role for %s" % orca_state.locusOfFocus
debug.println(debug.LEVEL_INFO, msg, True)
focusRole = None
if focusRole != pyatspi.ROLE_ENTRY or not self.utilities.inDocumentContent():
super().onFocus(event)
return
if event.source.getRole() == pyatspi.ROLE_MENU:
msg = "SEAMONKEY: Non-document menu claimed focus from document entry"
debug.println(debug.LEVEL_INFO, msg, True)
if self.utilities.lastInputEventWasPrintableKey():
msg = "SEAMONKEY: Ignoring, believed to be result of printable input"
debug.println(debug.LEVEL_INFO, msg, True)
return
super().onFocus(event)
def useFocusMode(self, obj, prevObj=None):
if self.utilities.isEditableMessage(obj):
msg = "SEAMONKEY: Using focus mode for editable message %s" % obj
debug.println(debug.LEVEL_INFO, msg, True)
return True
msg = "SEAMONKEY: %s is not an editable message." % obj
debug.println(debug.LEVEL_INFO, msg, True)
return super().useFocusMode(obj, prevObj)
def enableStickyBrowseMode(self, inputEvent, forceMessage=False):
if self.utilities.isEditableMessage(orca_state.locusOfFocus):
return
super().enableStickyBrowseMode(inputEvent, forceMessage)
def enableStickyFocusMode(self, inputEvent, forceMessage=False):
if self.utilities.isEditableMessage(orca_state.locusOfFocus):
return
super().enableStickyFocusMode(inputEvent, forceMessage)
def togglePresentationMode(self, inputEvent, documentFrame=None):
if self._inFocusMode \
and self.utilities.isEditableMessage(orca_state.locusOfFocus):
return
super().togglePresentationMode(inputEvent, documentFrame)
def useStructuralNavigationModel(self):
if self.utilities.isEditableMessage(orca_state.locusOfFocus):
return False
return super().useStructuralNavigationModel()
|
GNOME/orca
|
src/orca/scripts/apps/SeaMonkey/script.py
|
Python
|
lgpl-2.1
| 5,046
|
[
"ORCA"
] |
01f41de894266e8a81082b4fe5109626cf03c4dfba5cd86e816091855300c485
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Generalized Hartree-Fock for periodic systems at a single k-point
'''
import numpy as np
import scipy.linalg
import pyscf.scf.ghf as mol_ghf
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc.scf import hf as pbchf
from pyscf.pbc.scf import addons
from pyscf.pbc.scf import chkfile
def get_jk(mf, cell=None, dm=None, hermi=0, kpt=None, kpts_band=None,
with_j=True, with_k=True, **kwargs):
if cell is None: cell = mf.cell
if dm is None: dm = mf.make_rdm1()
if kpt is None: kpt = mf.kpt
dm = np.asarray(dm)
nso = dm.shape[-1]
nao = nso // 2
dms = dm.reshape(-1,nso,nso)
n_dm = dms.shape[0]
dmaa = dms[:,:nao,:nao]
dmab = dms[:,nao:,:nao]
dmbb = dms[:,nao:,nao:]
dms = np.vstack((dmaa, dmbb, dmab))
j1, k1 = mf.with_df.get_jk(dms, hermi, kpt, kpts_band, with_j, with_k,
exxdiv=mf.exxdiv)
j1 = j1.reshape(3,n_dm,nao,nao)
k1 = k1.reshape(3,n_dm,nao,nao)
vj = vk = None
if with_j:
vj = np.zeros((n_dm,nso,nso), j1.dtype)
vj[:,:nao,:nao] = vj[:,nao:,nao:] = j1[0] + j1[1]
vj = vj.reshape(dm.shape)
if with_k:
vk = np.zeros((n_dm,nso,nso), k1.dtype)
vk[:,:nao,:nao] = k1[0]
vk[:,nao:,nao:] = k1[1]
vk[:,:nao,nao:] = k1[2]
vk[:,nao:,:nao] = k1[2].transpose(0,2,1).conj()
vk = vk.reshape(dm.shape)
return vj, vk
class GHF(pbchf.SCF, mol_ghf.GHF):
'''GHF class for PBCs.
'''
def get_hcore(self, cell=None, kpt=None):
hcore = pbchf.SCF.get_hcore(self, cell, kpt)
return scipy.linalg.block_diag(hcore, hcore)
def get_ovlp(self, cell=None, kpt=None):
s = pbchf.SCF.get_ovlp(self, cell, kpt)
return scipy.linalg.block_diag(s, s)
get_jk = get_jk
get_occ = mol_ghf.get_occ
get_grad = mol_ghf.GHF.get_grad
def get_j(mf, cell=None, dm=None, hermi=0, kpt=None, kpts_band=None,
**kwargs):
return self.get_jk(cell, dm, hermi, kpt, kpts_band, True, False)[0]
def get_k(self, cell=None, dm=None, hermi=0, kpt=None, kpts_band=None,
**kwargs):
return self.get_jk(cell, dm, hermi, kpt, kpts_band, False, True)[1]
def get_veff(self, cell=None, dm=None, dm_last=0, vhf_last=0, hermi=1,
kpt=None, kpts_band=None):
vj, vk = self.get_jk(cell, dm, hermi, kpt, kpts_band, True, True)
vhf = vj - vk
return vhf
def get_bands(self, kpts_band, cell=None, dm=None, kpt=None):
'''Get energy bands at the given (arbitrary) 'band' k-points.
Returns:
mo_energy : (nmo,) ndarray or a list of (nmo,) ndarray
Bands energies E_n(k)
mo_coeff : (nao, nmo) ndarray or a list of (nao,nmo) ndarray
Band orbitals psi_n(k)
'''
raise NotImplementedError
def get_init_guess(self, cell=None, key='minao'):
if cell is None: cell = self.cell
dm = mol_ghf.GHF.get_init_guess(self, cell, key)
dm = pbchf.normalize_dm_(self, dm)
return dm
def convert_from_(self, mf):
'''Convert given mean-field object to RHF/ROHF'''
addons.convert_to_ghf(mf, self)
return self
stability = None
nuc_grad_method = None
if __name__ == '__main__':
from pyscf.scf import addons
from pyscf.pbc import gto
from pyscf.pbc import scf
cell = gto.Cell()
cell.atom = '''
H 0 0 0
H 1 0 0
H 0 1 0
H 0 1 1
'''
cell.a = np.eye(3)*2
cell.basis = [[0, [1.2, 1]]]
cell.verbose = 4
cell.build()
kpts = cell.make_kpts([2,2,2])
mf = scf.RHF(cell, kpt=kpts[7]).run()
mf = GHF(cell, kpt=kpts[7])
mf.kernel()
|
gkc1000/pyscf
|
pyscf/pbc/scf/ghf.py
|
Python
|
apache-2.0
| 4,417
|
[
"PySCF"
] |
7880bc7e49b7a5d711707557d81fda015b73e2a9f01c3f40d520ef939976a5d6
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
print(len(cw154))
print(len(trito))
totalfiles = normalB + mcell + pcell + cd19cell + cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr11"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("total_chrom11.phy", header=None, index=None)
print(tott.shape)
|
evanbiederstedt/RRBSfun
|
trees/chrom_scripts/total_chr11.py
|
Python
|
mit
| 32,998
|
[
"MCell"
] |
32cae4977a5a4321ac91b39115998b35c1664e776ee7f7a5a4a345de2eebc266
|
#
# @file TestRDFAnnotationC.py
# @brief RDFAnnotation parser unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestRDFAnnotationC.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestRDFAnnotationC(unittest.TestCase):
global d
d = None
global m
m = None
def setUp(self):
filename = "../../sbml/annotation/test/test-data/annotation.xml"
self.d = libsbml.readSBML(filename)
self.m = self.d.getModel()
pass
def tearDown(self):
_dummyList = [ self.d ]; _dummyList[:] = []; del _dummyList
pass
def test_RDFAnnotation_C_delete(self):
obj = self.m.getCompartment(0)
node = libsbml.RDFAnnotationParser.parseCVTerms(obj)
n1 = libsbml.RDFAnnotationParser.deleteRDFAnnotation(node)
self.assert_( n1.getNumChildren() == 0 )
self.assert_(( "annotation" == n1.getName() ))
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_RDFAnnotation_C_getModelHistory(self):
self.assert_( (self.m == None) == False )
history = self.m.getModelHistory()
self.assert_( history != None )
mc = history.getCreator(0)
self.assert_(( "Le Novere" == mc.getFamilyName() ))
self.assert_(( "Nicolas" == mc.getGivenName() ))
self.assert_(( "lenov@ebi.ac.uk" == mc.getEmail() ))
self.assert_(( "EMBL-EBI" == mc.getOrganisation() ))
date = history.getCreatedDate()
self.assert_( date.getYear() == 2005 )
self.assert_( date.getMonth() == 2 )
self.assert_( date.getDay() == 2 )
self.assert_( date.getHour() == 14 )
self.assert_( date.getMinute() == 56 )
self.assert_( date.getSecond() == 11 )
self.assert_( date.getSignOffset() == 0 )
self.assert_( date.getHoursOffset() == 0 )
self.assert_( date.getMinutesOffset() == 0 )
self.assert_(( "2005-02-02T14:56:11Z" == date.getDateAsString() ))
date = history.getModifiedDate()
self.assert_( date.getYear() == 2006 )
self.assert_( date.getMonth() == 5 )
self.assert_( date.getDay() == 30 )
self.assert_( date.getHour() == 10 )
self.assert_( date.getMinute() == 46 )
self.assert_( date.getSecond() == 2 )
self.assert_( date.getSignOffset() == 0 )
self.assert_( date.getHoursOffset() == 0 )
self.assert_( date.getMinutesOffset() == 0 )
self.assert_(( "2006-05-30T10:46:02Z" == date.getDateAsString() ))
pass
def test_RDFAnnotation_C_parseCVTerms(self):
obj = self.m.getCompartment(0)
node = libsbml.RDFAnnotationParser.parseCVTerms(obj)
self.assert_( node.getNumChildren() == 1 )
rdf = node.getChild(0)
self.assert_(( "RDF" == rdf.getName() ))
self.assert_(( "rdf" == rdf.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == rdf.getURI() ))
self.assert_( rdf.getNumChildren() == 1 )
desc = rdf.getChild(0)
self.assert_(( "Description" == desc.getName() ))
self.assert_(( "rdf" == desc.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == desc.getURI() ))
self.assert_( desc.getNumChildren() == 1 )
is1 = desc.getChild(0)
self.assert_(( "is" == is1.getName() ))
self.assert_(( "bqbiol" == is1.getPrefix() ))
self.assert_( is1.getNumChildren() == 1 )
Bag = is1.getChild(0)
self.assert_(( "Bag" == Bag.getName() ))
self.assert_(( "rdf" == Bag.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == Bag.getURI() ))
self.assert_( Bag.getNumChildren() == 4 )
li = Bag.getChild(0)
self.assert_(( "li" == li.getName() ))
self.assert_(( "rdf" == li.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li.getURI() ))
self.assert_( li.getNumChildren() == 0 )
li1 = Bag.getChild(1)
self.assert_(( "li" == li1.getName() ))
self.assert_(( "rdf" == li1.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li1.getURI() ))
self.assert_( li1.getNumChildren() == 0 )
li2 = Bag.getChild(2)
self.assert_(( "li" == li2.getName() ))
self.assert_(( "rdf" == li2.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li2.getURI() ))
self.assert_( li2.getNumChildren() == 0 )
li3 = Bag.getChild(3)
self.assert_(( "li" == li3.getName() ))
self.assert_(( "rdf" == li3.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li3.getURI() ))
self.assert_( li3.getNumChildren() == 0 )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_RDFAnnotation_C_parseModelHistory(self):
node = libsbml.RDFAnnotationParser.parseModelHistory(self.m)
self.assert_( node.getNumChildren() == 1 )
rdf = node.getChild(0)
self.assert_(( "RDF" == rdf.getName() ))
self.assert_(( "rdf" == rdf.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == rdf.getURI() ))
self.assert_( rdf.getNumChildren() == 1 )
desc = rdf.getChild(0)
self.assert_(( "Description" == desc.getName() ))
self.assert_(( "rdf" == desc.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == desc.getURI() ))
self.assert_( desc.getNumChildren() == 3 )
creator = desc.getChild(0)
self.assert_(( "creator" == creator.getName() ))
self.assert_(( "dc" == creator.getPrefix() ))
self.assert_(( "http://purl.org/dc/elements/1.1/" == creator.getURI() ))
self.assert_( creator.getNumChildren() == 1 )
Bag = creator.getChild(0)
self.assert_(( "Bag" == Bag.getName() ))
self.assert_(( "rdf" == Bag.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == Bag.getURI() ))
self.assert_( Bag.getNumChildren() == 1 )
li = Bag.getChild(0)
self.assert_(( "li" == li.getName() ))
self.assert_(( "rdf" == li.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li.getURI() ))
self.assert_( li.getNumChildren() == 3 )
N = li.getChild(0)
self.assert_(( "N" == N.getName() ))
self.assert_(( "vCard" == N.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == N.getURI() ))
self.assert_( N.getNumChildren() == 2 )
Family = N.getChild(0)
self.assert_(( "Family" == Family.getName() ))
self.assert_(( "vCard" == Family.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == Family.getURI() ))
self.assert_( Family.getNumChildren() == 1 )
Given = N.getChild(1)
self.assert_(( "Given" == Given.getName() ))
self.assert_(( "vCard" == Given.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == Given.getURI() ))
self.assert_( Given.getNumChildren() == 1 )
EMAIL = li.getChild(1)
self.assert_(( "EMAIL" == EMAIL.getName() ))
self.assert_(( "vCard" == EMAIL.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == EMAIL.getURI() ))
self.assert_( EMAIL.getNumChildren() == 1 )
ORG = li.getChild(2)
self.assert_(( "ORG" == ORG.getName() ))
self.assert_(( "vCard" == ORG.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == ORG.getURI() ))
self.assert_( ORG.getNumChildren() == 1 )
Orgname = ORG.getChild(0)
self.assert_(( "Orgname" == Orgname.getName() ))
self.assert_(( "vCard" == Orgname.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == Orgname.getURI() ))
self.assert_( Orgname.getNumChildren() == 1 )
created = desc.getChild(1)
self.assert_(( "created" == created.getName() ))
self.assert_(( "dcterms" == created.getPrefix() ))
self.assert_(( "http://purl.org/dc/terms/" == created.getURI() ))
self.assert_( created.getNumChildren() == 1 )
cr_date = created.getChild(0)
self.assert_(( "W3CDTF" == cr_date.getName() ))
self.assert_(( "dcterms" == cr_date.getPrefix() ))
self.assert_(( "http://purl.org/dc/terms/" == cr_date.getURI() ))
self.assert_( cr_date.getNumChildren() == 1 )
modified = desc.getChild(2)
self.assert_(( "modified" == modified.getName() ))
self.assert_(( "dcterms" == modified.getPrefix() ))
self.assert_(( "http://purl.org/dc/terms/" == modified.getURI() ))
self.assert_( modified.getNumChildren() == 1 )
mo_date = created.getChild(0)
self.assert_(( "W3CDTF" == mo_date.getName() ))
self.assert_(( "dcterms" == mo_date.getPrefix() ))
self.assert_(( "http://purl.org/dc/terms/" == mo_date.getURI() ))
self.assert_( mo_date.getNumChildren() == 1 )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestRDFAnnotationC))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/annotation/TestRDFAnnotationC.py
|
Python
|
bsd-3-clause
| 10,162
|
[
"VisIt"
] |
dd0ff51a404d0e3398ddefe12f6136cf573e0aa1d0fa996490f7e847249f7ac9
|
""" Chemical Signalling model loaded into moose can be save into Genesis-Kkit format """
__author__ = "Harsha Rani"
__copyright__ = "Copyright 2015, Harsha Rani and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Harsha Rani"
__email__ = "hrani@ncbs.res.in"
__status__ = "Development"
import sys
import random
from moose import wildcardFind, element, loadModel, ChemCompt, exists, Annotator, Pool, ZombiePool,PoolBase,CplxEnzBase,Function,ZombieFunction
import numpy as np
import re
GENESIS_COLOR_SEQUENCE = ((248, 0, 255), (240, 0, 255), (232, 0, 255), (224, 0, 255), (216, 0, 255), (208, 0, 255),
(200, 0, 255), (192, 0, 255), (184, 0, 255), (176, 0, 255), (168, 0, 255), (160, 0, 255), (152, 0, 255), (144, 0, 255),
(136, 0, 255), (128, 0, 255), (120, 0, 255), (112, 0, 255), (104, 0, 255), (96, 0, 255), (88, 0, 255), (80, 0, 255),
(72, 0, 255), (64, 0, 255), (56, 0, 255), (48, 0, 255), (40, 0, 255), (32, 0, 255), (24, 0, 255), (16, 0, 255),
(8, 0, 255), (0, 0, 255), (0, 8, 248), (0, 16, 240), (0, 24, 232), (0, 32, 224), (0, 40, 216), (0, 48, 208),
(0, 56, 200), (0, 64, 192), (0, 72, 184), (0, 80, 176), (0, 88, 168), (0, 96, 160), (0, 104, 152), (0, 112, 144),
(0, 120, 136), (0, 128, 128), (0, 136, 120), (0, 144, 112), (0, 152, 104), (0, 160, 96), (0, 168, 88), (0, 176, 80),
(0, 184, 72), (0, 192, 64), (0, 200, 56), (0, 208, 48), (0, 216, 40), (0, 224, 32), (0, 232, 24), (0, 240, 16), (0, 248, 8),
(0, 255, 0), (8, 255, 0), (16, 255, 0), (24, 255, 0), (32, 255, 0), (40, 255, 0), (48, 255, 0), (56, 255, 0), (64, 255, 0),
(72, 255, 0), (80, 255, 0), (88, 255, 0), (96, 255, 0), (104, 255, 0), (112, 255, 0), (120, 255, 0), (128, 255, 0),
(136, 255, 0), (144, 255, 0), (152, 255, 0), (160, 255, 0), (168, 255, 0), (176, 255, 0), (184, 255, 0), (192, 255, 0),
(200, 255, 0), (208, 255, 0), (216, 255, 0), (224, 255, 0), (232, 255, 0), (240, 255, 0), (248, 255, 0), (255, 255, 0),
(255, 248, 0), (255, 240, 0), (255, 232, 0), (255, 224, 0), (255, 216, 0), (255, 208, 0), (255, 200, 0), (255, 192, 0),
(255, 184, 0), (255, 176, 0), (255, 168, 0), (255, 160, 0), (255, 152, 0), (255, 144, 0), (255, 136, 0), (255, 128, 0),
(255, 120, 0), (255, 112, 0), (255, 104, 0), (255, 96, 0), (255, 88, 0), (255, 80, 0), (255, 72, 0), (255, 64, 0),
(255, 56, 0), (255, 48, 0), (255, 40, 0), (255, 32, 0), (255, 24, 0), (255, 16, 0), (255, 8, 0), (255, 0, 0))
#Todo : To be written
# --Notes
# --StimulusTable
def write( modelpath, filename,sceneitems=None):
if filename.rfind('.') != -1:
filename = filename[:filename.rfind('.')]
else:
filename = filename[:len(filename)]
filename = filename+'.g'
global NA
NA = 6.0221415e23
global xmin,xmax,ymin,ymax
global cord
global multi
xmin = ymin = 0
xmax = ymax = 1
multi = 50
cord = {}
compt = wildcardFind(modelpath+'/##[ISA=ChemCompt]')
maxVol = estimateDefaultVol(compt)
f = open(filename, 'w')
writeHeader (f,maxVol)
if (compt > 0):
if sceneitems == None:
#if sceneitems is none (loaded from script) then check x,y cord exists
xmin,ymin,xmax,ymax,positionInfoExist = getCor(modelpath,sceneitems)
if not positionInfoExist:
#incase of SBML or cspace or python Annotator is not populated then positionInfoExist= False
#print " x and y cordinates doesn't exist so auto cordinates"
print(" auto co-ordinates needs to be applied")
pass
else:
#This is when it comes from Gui where the objects are already layout on to scene
# so using thoes co-ordinates
xmin,ymin,xmax,ymax,positionInfoExist = getCor(modelpath,sceneitems)
gtId_vol = writeCompartment(modelpath,compt,f)
writePool(modelpath,f,gtId_vol)
reacList = writeReac(modelpath,f)
enzList = writeEnz(modelpath,f)
writeSumtotal(modelpath,f)
storeReacMsg(reacList,f)
storeEnzMsg(enzList,f)
writeGui(f)
tgraphs = wildcardFind(modelpath+'/##[ISA=Table2]')
if tgraphs:
writeplot(tgraphs,f)
storePlotMsgs(tgraphs,f)
writeFooter1(f)
writeNotes(modelpath,f)
writeFooter2(f)
return True
else:
print("Warning: writeKkit:: No model found on " , modelpath)
return False
def storeCplxEnzMsgs( enz, f ):
for sub in enz.neighbors["subOut"]:
s = "addmsg /kinetics/" + trimPath( sub ) + " /kinetics/" + trimPath(enz) + " SUBSTRATE n \n";
s = s+ "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath( sub ) + " REAC sA B \n";
f.write(s)
for prd in enz.neighbors["prd"]:
s = "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath(prd) + " MM_PRD pA\n";
f.write( s )
for enzOut in enz.neighbors["enzOut"]:
s = "addmsg /kinetics/" + trimPath( enzOut ) + " /kinetics/" + trimPath(enz) + " ENZYME n\n";
s = s+ "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath(enzOut) + " REAC eA B\n";
f.write( s )
def storeMMenzMsgs( enz, f):
subList = enz.neighbors["subOut"]
prdList = enz.neighbors["prd"]
enzDestList = enz.neighbors["enzDest"]
for esub in subList:
es = "addmsg /kinetics/" + trimPath(element(esub)) + " /kinetics/" + trimPath(enz) + " SUBSTRATE n \n";
es = es+"addmsg /kinetics/" + trimPath(enz) + " /kinetics/" + trimPath(element(esub)) + " REAC sA B \n";
f.write(es)
for eprd in prdList:
es = "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath( element(eprd)) + " MM_PRD pA \n";
f.write(es)
for eenzDest in enzDestList:
enzDest = "addmsg /kinetics/" + trimPath( element(eenzDest)) + " /kinetics/" + trimPath( enz ) + " ENZYME n \n";
f.write(enzDest)
def storeEnzMsg( enzList, f):
for enz in enzList:
enzClass = enz.className
if (enzClass == "ZombieMMenz" or enzClass == "MMenz"):
storeMMenzMsgs(enz, f)
else:
storeCplxEnzMsgs( enz, f )
def writeEnz( modelpath,f):
enzList = wildcardFind(modelpath+'/##[ISA=EnzBase]')
for enz in enzList:
x = random.randrange(0,10)
y = random.randrange(0,10)
textcolor = "green"
color = "red"
k1 = 0;
k2 = 0;
k3 = 0;
nInit = 0;
concInit = 0;
n = 0;
conc = 0;
enzParent = enz.parent
if (isinstance(enzParent.className,Pool)) or (isinstance(enzParent.className,ZombiePool)):
print(" raise exception enz doesn't have pool as parent")
return False
else:
vol = enzParent.volume * NA * 1e-3;
isMichaelisMenten = 0;
enzClass = enz.className
if (enzClass == "ZombieMMenz" or enzClass == "MMenz"):
k1 = enz.numKm
k3 = enz.kcat
k2 = 4.0*k3;
k1 = (k2 + k3) / k1;
isMichaelisMenten = 1;
elif (enzClass == "ZombieEnz" or enzClass == "Enz"):
k1 = enz.k1
k2 = enz.k2
k3 = enz.k3
cplx = enz.neighbors['cplx'][0]
nInit = cplx.nInit[0];
xe = cord[enz]['x']
ye = cord[enz]['y']
x = ((xe-xmin)/(xmax-xmin))*multi
y = ((ye-ymin)/(ymax-ymin))*multi
#y = ((ymax-ye)/(ymax-ymin))*multi
einfo = enz.path+'/info'
if exists(einfo):
color = Annotator(einfo).getField('color')
color = getColorCheck(color,GENESIS_COLOR_SEQUENCE)
textcolor = Annotator(einfo).getField('textColor')
textcolor = getColorCheck(textcolor,GENESIS_COLOR_SEQUENCE)
f.write("simundump kenz /kinetics/" + trimPath(enz) + " " + str(0)+ " " +
str(concInit) + " " +
str(conc) + " " +
str(nInit) + " " +
str(n) + " " +
str(vol) + " " +
str(k1) + " " +
str(k2) + " " +
str(k3) + " " +
str(0) + " " +
str(isMichaelisMenten) + " " +
"\"\"" + " " +
str(color) + " " + str(textcolor) + " \"\"" +
" " + str(int(x)) + " " + str(int(y)) + " "+str(0)+"\n")
return enzList
def nearestColorIndex(color, color_sequence):
#Trying to find the index to closest color map from the rainbow pickle file for matching the Genesis color map
distance = [ (color[0] - temp[0]) ** 2 + (color[1] - temp[1]) ** 2 + (color[2] - temp[2]) ** 2
for temp in color_sequence]
minindex = 0
for i in range(1, len(distance)):
if distance[minindex] > distance[i] : minindex = i
return minindex
def storeReacMsg(reacList,f):
for reac in reacList:
reacPath = trimPath( reac);
sublist = reac.neighbors["subOut"]
prdlist = reac.neighbors["prd"]
for sub in sublist:
s = "addmsg /kinetics/" + trimPath( sub ) + " /kinetics/" + reacPath + " SUBSTRATE n \n";
s = s + "addmsg /kinetics/" + reacPath + " /kinetics/" + trimPath( sub ) + " REAC A B \n";
f.write(s)
for prd in prdlist:
s = "addmsg /kinetics/" + trimPath( prd ) + " /kinetics/" + reacPath + " PRODUCT n \n";
s = s + "addmsg /kinetics/" + reacPath + " /kinetics/" + trimPath( prd ) + " REAC B A\n";
f.write( s)
def writeReac(modelpath,f):
reacList = wildcardFind(modelpath+'/##[ISA=ReacBase]')
for reac in reacList :
color = "blue"
textcolor = "red"
kf = reac.numKf
kb = reac.numKb
xr = cord[reac]['x']
yr = cord[reac]['y']
x = ((xr-xmin)/(xmax-xmin))*multi
y = ((yr-ymin)/(ymax-ymin))*multi
#y = ((ymax-yr)/(ymax-ymin))*multi
rinfo = reac.path+'/info'
if exists(rinfo):
color = Annotator(rinfo).getField('color')
color = getColorCheck(color,GENESIS_COLOR_SEQUENCE)
textcolor = Annotator(rinfo).getField('textColor')
textcolor = getColorCheck(textcolor,GENESIS_COLOR_SEQUENCE)
f.write("simundump kreac /kinetics/" + trimPath(reac) + " " +str(0) +" "+ str(kf) + " " + str(kb) + " \"\" " +
str(color) + " " + str(textcolor) + " " + str(int(x)) + " " + str(int(y)) + " 0\n")
return reacList
def trimPath(mobj):
original = mobj
mobj = element(mobj)
found = False
while not isinstance(mobj,ChemCompt) and mobj.path != "/":
mobj = element(mobj.parent)
found = True
if mobj.path == "/":
print("compartment is not found with the given path and the path has reached root ",original)
return
#other than the kinetics compartment, all the othername are converted to group in Genesis which are place under /kinetics
# Any moose object comes under /kinetics then one level down the path is taken.
# e.g /group/poolObject or /Reac
if found:
if mobj.name != "kinetics":
splitpath = original.path[(original.path.find(mobj.name)):len(original.path)]
else:
pos = original.path.find(mobj.name)
slash = original.path.find('/',pos+1)
splitpath = original.path[slash+1:len(original.path)]
splitpath = re.sub("\[[0-9]+\]", "", splitpath)
s = splitpath.replace("_dash_",'-')
return s
def writeSumtotal( modelpath,f):
funclist = wildcardFind(modelpath+'/##[ISA=Function]')
for func in funclist:
funcInputs = element(func.path+'/x[0]')
s = ""
for funcInput in funcInputs.neighbors["input"]:
s = s+ "addmsg /kinetics/" + trimPath(funcInput)+ " /kinetics/" + trimPath(element(func.parent)) + " SUMTOTAL n nInit\n"
f.write(s)
def storePlotMsgs( tgraphs,f):
s = ""
if tgraphs:
for graph in tgraphs:
slash = graph.path.find('graphs')
if not slash > -1:
slash = graph.path.find('graph_0')
if slash > -1:
conc = graph.path.find('conc')
if conc > -1 :
tabPath = graph.path[slash:len(graph.path)]
else:
slash1 = graph.path.find('/',slash)
tabPath = "/graphs/conc1" +graph.path[slash1:len(graph.path)]
if len(element(graph).msgOut):
poolPath = (element(graph).msgOut)[0].e2.path
poolEle = element(poolPath)
poolName = poolEle.name
bgPath = (poolEle.path+'/info')
bg = Annotator(bgPath).color
bg = getColorCheck(bg,GENESIS_COLOR_SEQUENCE)
tabPath = re.sub("\[[0-9]+\]", "", tabPath)
s = s+"addmsg /kinetics/" + trimPath( poolEle ) + " " + tabPath + \
" PLOT Co *" + poolName + " *" + bg +"\n";
f.write(s)
def writeplot( tgraphs,f ):
if tgraphs:
for graphs in tgraphs:
slash = graphs.path.find('graphs')
if not slash > -1:
slash = graphs.path.find('graph_0')
if slash > -1:
conc = graphs.path.find('conc')
if conc > -1 :
tabPath = "/"+graphs.path[slash:len(graphs.path)]
else:
slash1 = graphs.path.find('/',slash)
tabPath = "/graphs/conc1" +graphs.path[slash1:len(graphs.path)]
if len(element(graphs).msgOut):
poolPath = (element(graphs).msgOut)[0].e2.path
poolEle = element(poolPath)
poolAnno = (poolEle.path+'/info')
fg = Annotator(poolAnno).textColor
fg = getColorCheck(fg,GENESIS_COLOR_SEQUENCE)
tabPath = re.sub("\[[0-9]+\]", "", tabPath)
f.write("simundump xplot " + tabPath + " 3 524288 \\\n" + "\"delete_plot.w <s> <d>; edit_plot.D <w>\" " + fg + " 0 0 1\n")
def writePool(modelpath,f,volIndex):
for p in wildcardFind(modelpath+'/##[ISA=PoolBase]'):
slave_enable = 0
if (p.className == "BufPool" or p.className == "ZombieBufPool"):
pool_children = p.children
if pool_children== 0:
slave_enable = 4
else:
for pchild in pool_children:
if not(pchild.className == "ZombieFunction") and not(pchild.className == "Function"):
slave_enable = 4
else:
slave_enable = 0
break
xp = cord[p]['x']
yp = cord[p]['y']
x = ((xp-xmin)/(xmax-xmin))*multi
y = ((yp-ymin)/(ymax-ymin))*multi
#y = ((ymax-yp)/(ymax-ymin))*multi
pinfo = p.path+'/info'
if exists(pinfo):
color = Annotator(pinfo).getField('color')
color = getColorCheck(color,GENESIS_COLOR_SEQUENCE)
textcolor = Annotator(pinfo).getField('textColor')
textcolor = getColorCheck(textcolor,GENESIS_COLOR_SEQUENCE)
geometryName = volIndex[p.volume]
volume = p.volume * NA * 1e-3
f.write("simundump kpool /kinetics/" + trimPath(p) + " 0 " +
str(p.diffConst) + " " +
str(0) + " " +
str(0) + " " +
str(0) + " " +
str(p.nInit) + " " +
str(0) + " " + str(0) + " " +
str(volume)+ " " +
str(slave_enable) +
" /kinetics"+ geometryName + " " +
str(color) +" " + str(textcolor) + " " + str(int(x)) + " " + str(int(y)) + " "+ str(0)+"\n")
# print " notes ",notes
# return notes
def getColorCheck(color,GENESIS_COLOR_SEQUENCE):
if isinstance(color, str):
if color.startswith("#"):
color = ( int(color[1:3], 16)
, int(color[3:5], 16)
, int(color[5:7], 16)
)
index = nearestColorIndex(color, GENESIS_COLOR_SEQUENCE)
return index
elif color.startswith("("):
color = eval(color)[0:3]
index = nearestColorIndex(color, GENESIS_COLOR_SEQUENCE)
return index
else:
index = color
return index
elif isinstance(color, tuple):
color = map(int, color)[0:3]
index = nearestColorIndex(color, GENESIS_COLOR_SEQUENCE)
return index
elif isinstance(color, int):
index = color
return index
else:
raise Exception("Invalid Color Value!")
def getxyCord(xcord,ycord,list1,sceneitems):
for item in list1:
if not ( isinstance(item,Function) and isinstance(item,ZombieFunction) ):
if sceneitems == None:
objInfo = item.path+'/info'
xpos = xyPosition(objInfo,'x')
ypos = xyPosition(objInfo,'y')
else:
co = sceneitems[item]
xpos = co.scenePos().x()
ypos =-co.scenePos().y()
cord[item] ={ 'x': xpos,'y':ypos}
xcord.append(xpos)
ycord.append(ypos)
def xyPosition(objInfo,xory):
try:
return(float(element(objInfo).getField(xory)))
except ValueError:
return (float(0))
def getCor(modelRoot,sceneitems):
xmin = ymin = 0.0
xmax = ymax = 1.0
positionInfoExist = False
xcord = ycord = []
mollist = realist = enzlist = cplxlist = tablist = funclist = []
meshEntryWildcard = '/##[ISA=ChemCompt]'
if modelRoot != '/':
meshEntryWildcard = modelRoot+meshEntryWildcard
for meshEnt in wildcardFind(meshEntryWildcard):
mol_cpl = wildcardFind(meshEnt.path+'/##[ISA=PoolBase]')
realist = wildcardFind(meshEnt.path+'/##[ISA=ReacBase]')
enzlist = wildcardFind(meshEnt.path+'/##[ISA=EnzBase]')
funclist = wildcardFind(meshEnt.path+'/##[ISA=Function]')
tablist = wildcardFind(meshEnt.path+'/##[ISA=StimulusTable]')
if mol_cpl or funclist or enzlist or realist or tablist:
for m in mol_cpl:
if isinstance(element(m.parent),CplxEnzBase):
cplxlist.append(m)
objInfo = m.parent.path+'/info'
elif isinstance(element(m),PoolBase):
mollist.append(m)
objInfo =m.path+'/info'
if sceneitems == None:
xx = xyPosition(objInfo,'x')
yy = xyPosition(objInfo,'y')
else:
c = sceneitems[m]
xx = c.scenePos().x()
yy =-c.scenePos().y()
cord[m] ={ 'x': xx,'y':yy}
xcord.append(xx)
ycord.append(yy)
getxyCord(xcord,ycord,realist,sceneitems)
getxyCord(xcord,ycord,enzlist,sceneitems)
getxyCord(xcord,ycord,funclist,sceneitems)
getxyCord(xcord,ycord,tablist,sceneitems)
xmin = min(xcord)
xmax = max(xcord)
ymin = min(ycord)
ymax = max(ycord)
positionInfoExist = not(len(np.nonzero(xcord)[0]) == 0 \
and len(np.nonzero(ycord)[0]) == 0)
return(xmin,ymin,xmax,ymax,positionInfoExist)
def writeCompartment(modelpath,compts,f):
index = 0
volIndex = {}
for compt in compts:
if compt.name != "kinetics":
xgrp = xmax -random.randrange(1,10)
ygrp = ymin +random.randrange(1,10)
x = ((xgrp-xmin)/(xmax-xmin))*multi
#y = ((ymax-ygrp)/(ymax-ymin))*multi
y = ((ygrp-ymin)/(ymax-ymin))*multi
f.write("simundump group /kinetics/" + compt.name + " 0 " + "blue" + " " + "green" + " x 0 0 \"\" defaultfile \\\n" )
f.write( " defaultfile.g 0 0 0 " + str(int(x)) + " " + str(int(y)) + " 0\n")
i = 0
l = len(compts)
geometry = ""
for compt in compts:
size = compt.volume
ndim = compt.numDimensions
vecIndex = l-i-1
#print vecIndex
i = i+1
xgeo = xmax -random.randrange(1,10)
ygeo = ymin +random.randrange(1,10)
x = ((xgeo-xmin)/(xmax-xmin))*multi
#y = ((ymax-ygeo)/(ymax-ymin))*multi
y = ((ygeo-ymin)/(ymax-ymin))*multi
if vecIndex > 0:
geometry = geometry+"simundump geometry /kinetics" + "/geometry[" + str(vecIndex) +"] 0 " + str(size) + " " + str(ndim) + " sphere " +" \"\" white black "+ str(int(x)) + " " +str(int(y)) +" 0\n";
volIndex[size] = "/geometry["+str(vecIndex)+"]"
else:
geometry = geometry+"simundump geometry /kinetics" + "/geometry 0 " + str(size) + " " + str(ndim) + " sphere " +" \"\" white black " + str(int(x)) + " "+str(int(y))+ " 0\n";
volIndex[size] = "/geometry"
f.write(geometry)
writeGroup(modelpath,f,xmax,ymax)
return volIndex
def writeGroup(modelpath,f,xmax,ymax):
ignore = ["graphs","moregraphs","geometry","groups","conc1","conc2","conc3","conc4","model","data","graph_0","graph_1","graph_2","graph_3","graph_4","graph_5"]
for g in wildcardFind(modelpath+'/##[TYPE=Neutral]'):
if not g.name in ignore:
if trimPath(g) != None:
xgrp1 = xmax - random.randrange(1,10)
ygrp1 = ymin + random.randrange(1,10)
x = ((xgrp1-xmin)/(xmax-xmin))*multi
#y = ((ymax-ygrp1)/(ymax-ymin))*multi
y = ((ygrp1-ymin)/(ymax-ymin))*multi
f.write("simundump group /kinetics/" + trimPath(g) + " 0 " + "blue" + " " + "green" + " x 0 0 \"\" defaultfile \\\n")
f.write(" defaultfile.g 0 0 0 " + str(int(x)) + " " + str(int(y)) + " 0\n")
def writeHeader(f,maxVol):
simdt = 0.001
plotdt = 0.1
rawtime = 100
maxtime = 100
defaultVol = maxVol
f.write("//genesis\n"
"// kkit Version 11 flat dumpfile\n\n"
"// Saved on " + str(rawtime)+"\n"
"include kkit {argv 1}\n"
"FASTDT = " + str(simdt)+"\n"
"SIMDT = " +str(simdt)+"\n"
"CONTROLDT = " +str(plotdt)+"\n"
"PLOTDT = " +str(plotdt)+"\n"
"MAXTIME = " +str(maxtime)+"\n"
"TRANSIENT_TIME = 2"+"\n"
"VARIABLE_DT_FLAG = 0"+"\n"
"DEFAULT_VOL = " +str(defaultVol)+"\n"
"VERSION = 11.0 \n"
"setfield /file/modpath value ~/scripts/modules\n"
"kparms\n\n"
)
f.write( "//genesis\n"
"initdump -version 3 -ignoreorphans 1\n"
"simobjdump table input output alloced step_mode stepsize x y z\n"
"simobjdump xtree path script namemode sizescale\n"
"simobjdump xcoredraw xmin xmax ymin ymax\n"
"simobjdump xtext editable\n"
"simobjdump xgraph xmin xmax ymin ymax overlay\n"
"simobjdump xplot pixflags script fg ysquish do_slope wy\n"
"simobjdump group xtree_fg_req xtree_textfg_req plotfield expanded movealone \\\n"
" link savename file version md5sum mod_save_flag x y z\n"
"simobjdump geometry size dim shape outside xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kpool DiffConst CoInit Co n nInit mwt nMin vol slave_enable \\\n"
" geomname xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kreac kf kb notes xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kenz CoComplexInit CoComplex nComplexInit nComplex vol k1 k2 k3 \\\n"
" keepconc usecomplex notes xtree_fg_req xtree_textfg_req link x y z\n"
"simobjdump stim level1 width1 delay1 level2 width2 delay2 baselevel trig_time \\\n"
" trig_mode notes xtree_fg_req xtree_textfg_req is_running x y z\n"
"simobjdump xtab input output alloced step_mode stepsize notes editfunc \\\n"
" xtree_fg_req xtree_textfg_req baselevel last_x last_y is_running x y z\n"
"simobjdump kchan perm gmax Vm is_active use_nernst notewriteReacs xtree_fg_req \\\n"
" xtree_textfg_req x y z\n"
"simobjdump transport input output alloced step_mode stepsize dt delay clock \\\n"
" kf xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump proto x y z\n"
)
def estimateDefaultVol(compts):
maxVol = 0
vol = []
for compt in compts:
vol.append(compt.volume)
if len(vol) > 0:
return max(vol)
return maxVol
def writeGui( f ):
f.write("simundump xgraph /graphs/conc1 0 0 99 0.001 0.999 0\n"
"simundump xgraph /graphs/conc2 0 0 100 0 1 0\n"
"simundump xgraph /moregraphs/conc3 0 0 100 0 1 0\n"
"simundump xgraph /moregraphs/conc4 0 0 100 0 1 0\n"
"simundump xcoredraw /edit/draw 0 -6 4 -2 6\n"
"simundump xtree /edit/draw/tree 0 \\\n"
" /kinetics/#[],/kinetics/#[]/#[],/kinetics/#[]/#[]/#[][TYPE!=proto],/kinetics/#[]/#[]/#[][TYPE!=linkinfo]/##[] \"edit_elm.D <v>; drag_from_edit.w <d> <S> <x> <y> <z>\" auto 0.6\n"
"simundump xtext /file/notes 0 1\n")
def writeNotes(modelpath,f):
notes = ""
items = wildcardFind(modelpath+"/##[ISA=ChemCompt],/##[ISA=ReacBase],/##[ISA=PoolBase],/##[ISA=EnzBase],/##[ISA=Function],/##[ISA=StimulusTable]")
for item in items:
info = item.path+'/info'
notes = Annotator(info).getField('notes')
if (notes):
f.write("call /kinetics/"+ trimPath(item)+"/notes LOAD \ \n\""+Annotator(info).getField('notes')+"\"\n")
def writeFooter1(f):
f.write("\nenddump\n // End of dump\n")
def writeFooter2(f):
f.write("complete_loading\n")
if __name__ == "__main__":
import sys
filename = sys.argv[1]
modelpath = filename[0:filename.find('.')]
loadModel(filename,'/'+modelpath,"gsl")
output = filename.g
written = write('/'+modelpath,output)
if written:
print(" file written to ",output)
else:
print(" could be written to kkit format")
|
rahulgayatri23/moose-core
|
python/moose/genesis/_main.py
|
Python
|
gpl-3.0
| 31,468
|
[
"MOOSE"
] |
026b69aef0c3a27588708fd149baa362a023b8c8b41f508c12d4d5359609e70c
|
import sys
import math
sys.path.append('../../../vmdgadgets')
import vmdutil
from vmdutil import vmddef
def rotate_root(frame_no, interval, dir=True):
bone = vmddef.BONE_SAMPLE
bone = bone._replace(name='センター'.encode('shift-jis'))
bone_frames = []
d = 1 if dir else -1
for i in range(4):
y = i * math.pi / 2
e = (0, -y * d, 0)
rotation = vmdutil.euler_to_quaternion(e)
bone_frames.append(
bone._replace(
frame=frame_no, rotation=rotation))
frame_no += interval
return bone_frames
if __name__ == '__main__':
# left
#interval =113
#interval =120
#interval =133
interval = 161
#interval =280
# right
#interval = 123
#interval = 147
#interval = 181
#frames = 4432
#frames = 4508
frames = 3864
rounds = frames // (interval*4) + 1
bone_frames = []
for i in range(rounds):
frame = i * interval * 4
c = rotate_root(frame, interval, False)
for f in c:
if f.frame <= frames:
bone_frames.append(f)
else:
break
vmdout = vmdutil.Vmdio()
vmdout.header = vmdout.header._replace(
model_name='circle_sample'.encode(vmddef.ENCODING))
vmdout.set_frames('bones', bone_frames)
vmdout.store('stage.vmd')
|
Hashi4/vmdgadgets
|
sample/lookat/sm31942771/round_floor.py
|
Python
|
apache-2.0
| 1,360
|
[
"VMD"
] |
5eb607925a449ef6aac775e7e89c5b10ef976e323ee68333412049c55b4e1e7e
|
#!/usr/bin/env python
# qm.py -- A Quine McCluskey Python implementation
#
# Copyright (c) 2006-2013 Thomas Pircher <tehpeh@gmx.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""An implementation of the Quine McCluskey algorithm.
This implementation of the Quine McCluskey algorithm has no inherent limits
(other than the calculation time) on the size of the inputs.
Also, in the limited tests of the author of this module, this implementation is
considerably faster than other public Python implementations for non-trivial
inputs.
Another unique feature of this implementation is the possibility to use the XOR
and XNOR operators, in addition to the normal AND operator, to minimise the
terms. This slows down the algorithm, but in some cases it can be a big win in
terms of complexity of the output.
"""
from __future__ import print_function
import math
from shatter.util.ordered_set import OrderedSet
from shatter.util.inverse_tree_set import InverseTreeSet
class QuineMcCluskey:
"""The Quine McCluskey class.
The QuineMcCluskey class minimises boolean functions using the Quine
McCluskey algorithm.
If the class was instantiated with the use_xor set to True, then the
resulting boolean function may contain XOR and XNOR operators.
"""
__version__ = "0.2"
def __init__(self, use_xor=False):
"""The class constructor.
Kwargs:
use_xor (bool): if True, try to use XOR and XNOR operations to give
a more compact return.
"""
self.use_xor = use_xor # Whether or not to use XOR and XNOR operations.
self.n_bits = 0 # number of bits (i.e. self.n_bits == len(ones[i]) for every i).
def __num2str(self, i):
"""
Convert an integer to its bit-representation in a string.
Args:
i (int): the number to convert.
Returns:
The binary string representation of the parameter i.
"""
x = ['1' if i & (1 << k) else '0' for k in range(self.n_bits - 1, -1, -1)]
return "".join(x)
def simplify(self, ones, dc=[]):
"""Simplify a list of terms.
Args:
ones (list of int): list of integers that describe when the output
function is '1', e.g. [1, 2, 6, 8, 15].
Kwargs:
dc (list of int): list of numbers for which we don't care if they
have one or zero in the output.
Returns:
see: simplify_los.
Example:
ones = [2, 6, 10, 14]
dc = []
This will produce the ouput: ['--10']
This means x = b1 & ~b0, (bit1 AND NOT bit0)
Example:
ones = [1, 2, 5, 6, 9, 10, 13, 14]
dc = []
This will produce the ouput: ['--^^'].
In other words, x = b1 ^ b0, (bit1 XOR bit0).
"""
terms = ones + dc
if len(terms) == 0:
return None
# Calculate the number of bits to use
# Needed internally by __num2str()
self.n_bits = int(math.ceil(math.log(max(terms) + 1, 2)))
# Generate the sets of ones and dontcares
ones = set(self.__num2str(i) for i in ones)
dc = set(self.__num2str(i) for i in dc)
return self.simplify_los(ones, dc)
def simplify_los(self, terms):
"""The simplification algorithm for a list of string-encoded inputs.
Args:
terms (list of str): list of strings that describe when the output
function is '1', e.g. ['0001', '0010', '0110', '1000', '1111'].
Kwargs:
dc: (list of str)set of strings that define the don't care
combinations.
Returns:
Returns a set of strings which represent the reduced minterms. The
length of the strings is equal to the number of bits in the input.
Character 0 of the output string stands for the most significant
bit, Character n - 1 (n is the number of bits) stands for the least
significant bit.
The following characters are allowed in the return string:
'-' don't care: this bit can be either zero or one.
'1' the bit must be one.
'0' the bit must be zero.
'^' all bits with the caret are XOR-ed together.
'~' all bits with the tilde are XNOR-ed together.
Example:
ones = ['0010', '0110', '1010', '1110']
dc = []
This will produce the ouput: ['--10'].
In other words, x = b1 & ~b0, (bit1 AND NOT bit0).
Example:
ones = ['0001', '0010', '0101', '0110', '1001', '1010' '1101', '1110']
dc = []
This will produce the ouput: ['--^^'].
In other words, x = b1 ^ b0, (bit1 XOR bit0).
"""
self.profile_cmp = 0 # number of comparisons (for profiling)
self.profile_xor = 0 # number of comparisons (for profiling)
self.profile_xnor = 0 # number of comparisons (for profiling)
if len(terms) == 0:
return None
# Calculate the number of bits to use
self.n_bits = max(len(i) for i in terms)
if self.n_bits != min(len(i) for i in terms):
return None
# First step of Quine-McCluskey method.
prime_implicants = self.__get_prime_implicants(terms)
# Remove essential terms.
essential_implicants = self.__get_essential_implicants(prime_implicants)
# Insert here the Quine McCluskey step 2: prime implicant chart.
# Insert here Petrick's Method.
return essential_implicants
def __reduce_simple_xor_terms(self, t1, t2):
"""Try to reduce two terms t1 and t2, by combining them as XOR terms.
Args:
t1 (str): a term.
t2 (str): a term.
Returns:
The reduced term or None if the terms cannot be reduced.
"""
difft10 = 0
difft20 = 0
ret = []
for (t1c, t2c) in zip(t1, t2):
if t1c == '^' or t2c == '^' or t1c == '~' or t2c == '~':
return None
elif t1c != t2c:
ret.append('^')
if t2c == '0':
difft10 += 1
else:
difft20 += 1
else:
ret.append(t1c)
if difft10 == 1 and difft20 == 1:
return "".join(ret)
return None
def __reduce_simple_xnor_terms(self, t1, t2):
"""Try to reduce two terms t1 and t2, by combining them as XNOR terms.
Args:
t1 (str): a term.
t2 (str): a term.
Returns:
The reduced term or None if the terms cannot be reduced.
"""
difft10 = 0
difft20 = 0
ret = []
for (t1c, t2c) in zip(t1, t2):
if t1c == '^' or t2c == '^' or t1c == '~' or t2c == '~':
return None
elif t1c != t2c:
ret.append('~')
if t1c == '0':
difft10 += 1
else:
difft20 += 1
else:
ret.append(t1c)
if (difft10 == 2 and difft20 == 0) or (difft10 == 0 and difft20 == 2):
return "".join(ret)
return None
def __get_prime_implicants(self, terms):
"""Simplify the set 'terms'.
Args:
terms (set of str): set of strings representing the minterms of
ones and dontcares.
Returns:
A list of prime implicants. These are the minterms that cannot be
reduced with step 1 of the Quine McCluskey method.
This is the very first step in the Quine McCluskey algorithm. This
generates all prime implicants, whether they are redundant or not.
"""
# Sort and remove duplicates.
n_groups = self.n_bits + 1
marked = OrderedSet()
# Group terms into the list groups.
# groups is a list of length n_groups.
# Each element of groups is a set of terms with the same number
# of ones. In other words, each term contained in the set
# groups[i] contains exactly i ones.
groups = [OrderedSet() for i in range(n_groups)]
for t in terms:
n_bits = t.count('1')
groups[n_bits].add(t)
if self.use_xor:
# Add 'simple' XOR and XNOR terms to the set of terms.
# Simple means the terms can be obtained by combining just two
# bits.
for gi, group in enumerate(groups):
for t1 in group:
for t2 in group:
t12 = self.__reduce_simple_xor_terms(t1, t2)
if t12 != None:
terms.add(t12)
if gi < n_groups - 2:
for t2 in groups[gi + 2]:
t12 = self.__reduce_simple_xnor_terms(t1, t2)
if t12 != None:
terms.add(t12)
done = False
while not done:
# Group terms into groups.
# groups is a list of length n_groups.
# Each element of groups is a set of terms with the same
# number of ones. In other words, each term contained in the
# set groups[i] contains exactly i ones.
groups = dict()
for t in terms:
n_ones = t.count('1')
n_xor = t.count('^')
n_xnor = t.count('~')
# The algorithm can not cope with mixed XORs and XNORs in
# one expression.
assert n_xor == 0 or n_xnor == 0
key = (n_ones, n_xor, n_xnor)
if key not in groups:
groups[key] = OrderedSet()
groups[key].add(t)
terms = OrderedSet() # The set of new created terms
used = OrderedSet() # The set of used terms
# Find prime implicants
for key in groups:
key_next = (key[0]+1, key[1], key[2])
if key_next in groups:
group_next = groups[key_next]
for t1 in groups[key]:
# Optimisation:
# The Quine-McCluskey algorithm compares t1 with
# each element of the next group. (Normal approach)
# But in reality it is faster to construct all
# possible permutations of t1 by adding a '1' in
# opportune positions and check if this new term is
# contained in the set groups[key_next].
for i, c1 in enumerate(t1):
if c1 == '0':
self.profile_cmp += 1
t2 = t1[:i] + '1' + t1[i+1:]
if t2 in group_next:
t12 = t1[:i] + '-' + t1[i+1:]
used.add(t1)
used.add(t2)
terms.add(t12)
# Find XOR combinations
for key in [k for k in groups if k[1] > 0]:
key_complement = (key[0] + 1, key[2], key[1])
if key_complement in groups:
for t1 in groups[key]:
t1_complement = t1.replace('^', '~')
for i, c1 in enumerate(t1):
if c1 == '0':
self.profile_xor += 1
t2 = t1_complement[:i] + '1' + t1_complement[i+1:]
if t2 in groups[key_complement]:
t12 = t1[:i] + '^' + t1[i+1:]
used.add(t1)
terms.add(t12)
# Find XNOR combinations
for key in [k for k in groups if k[2] > 0]:
key_complement = (key[0] + 1, key[2], key[1])
if key_complement in groups:
for t1 in groups[key]:
t1_complement = t1.replace('~', '^')
for i, c1 in enumerate(t1):
if c1 == '0':
self.profile_xnor += 1
t2 = t1_complement[:i] + '1' + t1_complement[i+1:]
if t2 in groups[key_complement]:
t12 = t1[:i] + '~' + t1[i+1:]
used.add(t1)
terms.add(t12)
# Add the unused terms to the list of marked terms
for g in list(groups.values()):
marked |= g - used
if len(used) == 0:
done = True
# Prepare the list of prime implicants
pi = marked
for g in list(groups.values()):
pi |= g
return pi
def __get_essential_implicants(self, terms):
"""Simplify the set 'terms'.
Args:
terms (set of str): set of strings representing the minterms of
ones and dontcares.
Returns:
A list of prime implicants. These are the minterms that cannot be
reduced with step 1 of the Quine McCluskey method.
This function is usually called after __get_prime_implicants and its
objective is to remove non-essential minterms.
In reality this function omits all terms that can be covered by at
least one other term in the list.
"""
# Create all permutations for each term in terms.
perms = {}
for t in terms:
perms[t] = set(p for p in self.permutations(t))
# Now group the remaining terms and see if any term can be covered
# by a combination of terms.
ei_range = OrderedSet()
ei = InverseTreeSet([])
groups = dict()
for t in terms:
n = self.__get_term_rank(t, len(perms[t]))
if n not in groups:
groups[n] = OrderedSet()
groups[n].add(t)
for t in sorted(list(groups.keys()), reverse=True):
for g in groups[t]:
if not perms[g] <= ei_range:
ei.add(g)
ei_range |= perms[g]
return ei
def __get_term_rank(self, term, term_range):
"""Calculate the "rank" of a term.
Args:
term (str): one single term in string format.
term_range (int): the rank of the class of term.
Returns:
The "rank" of the term.
The rank of a term is a positive number or zero. If a term has all
bits fixed '0's then its "rank" is 0. The more 'dontcares' and xor or
xnor it contains, the higher its rank.
A dontcare weights more than a xor, a xor weights more than a xnor, a
xnor weights more than 1 and a 1 weights more than a 0.
This means, the higher rank of a term, the more desireable it is to
include this term in the final result.
"""
n = 0
for t in term:
if t == "-":
n += 8
elif t == "^":
n += 4
elif t == "~":
n += 2
elif t == "1":
n += 1
return 4*term_range + n
def permutations(self, value=''):
"""Iterator to generate all possible values out of a string.
Args:
value (str): A string containing any of the above characters.
Returns:
The output strings contain only '0' and '1'.
Example:
from qm import QuineMcCluskey
qm = QuineMcCluskey()
for i in qm.permutations('1--^^'):
print(i)
The operation performed by this generator function can be seen as the
inverse of binary minimisation methonds such as Karnaugh maps, Quine
McCluskey or Espresso. It takes as input a minterm and generates all
possible maxterms from it. Inputs and outputs are strings.
Possible input characters:
'0': the bit at this position will always be zero.
'1': the bit at this position will always be one.
'-': don't care: this bit can be zero or one.
'^': all bits with the caret are XOR-ed together.
'~': all bits with the tilde are XNOR-ed together.
Algorithm description:
This lovely piece of spaghetti code generates all possibe
permutations of a given string describing logic operations.
This could be achieved by recursively running through all
possibilities, but a more linear approach has been preferred.
The basic idea of this algorithm is to consider all bit
positions from 0 upwards (direction = +1) until the last bit
position. When the last bit position has been reached, then the
generated string is yielded. At this point the algorithm works
its way backward (direction = -1) until it finds an operator
like '-', '^' or '~'. The bit at this position is then flipped
(generally from '0' to '1') and the direction flag again
inverted. This way the bit position pointer (i) runs forth and
back several times until all possible permutations have been
generated.
When the position pointer reaches position -1, all possible
combinations have been visited.
"""
n_bits = len(value)
n_xor = value.count('^') + value.count('~')
xor_value = 0
seen_xors = 0
res = ['0' for i in range(n_bits)]
i = 0
direction = +1
while i >= 0:
# binary constant
if value[i] == '0' or value[i] == '1':
res[i] = value[i]
# dontcare operator
elif value[i] == '-':
if direction == +1:
res[i] = '0'
elif res[i] == '0':
res[i] = '1'
direction = +1
# XOR operator
elif value[i] == '^':
seen_xors = seen_xors + direction
if direction == +1:
if seen_xors == n_xor and xor_value == 0:
res[i] = '1'
else:
res[i] = '0'
else:
if res[i] == '0' and seen_xors < n_xor - 1:
res[i] = '1'
direction = +1
seen_xors = seen_xors + 1
if res[i] == '1':
xor_value = xor_value ^ 1
# XNOR operator
elif value[i] == '~':
seen_xors = seen_xors + direction
if direction == +1:
if seen_xors == n_xor and xor_value == 1:
res[i] = '1'
else:
res[i] = '0'
else:
if res[i] == '0' and seen_xors < n_xor - 1:
res[i] = '1'
direction = +1
seen_xors = seen_xors + 1
if res[i] == '1':
xor_value = xor_value ^ 1
# unknown input
else:
res[i] = '#'
i = i + direction
if i == n_bits:
direction = -1
i = n_bits - 1
yield "".join(res)
|
jisazaTappsi/shatter
|
shatter/qm.py
|
Python
|
mit
| 20,868
|
[
"ESPResSo"
] |
bf06f54abe32cb467e1c0b855a8027b34ee732d7693de1728309958c56199af7
|
from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.rst')).read()
version = '0.2.1.2'
install_requires = [ 'PyYAML', 'Mako', 'rpy2',
# List your project dependencies here.
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
]
setup(name='blacktie',
version=version,
description="A python wrapper for analysis of RNA-seq data with the popular tophat/cufflinks pipeline.",
long_description=README + '\n\n' + NEWS,
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
],
keywords='scientific computing RNA-seq tophat cufflinks bowtie CummeRbund',
author='Augustine Dunn',
author_email='wadunn83@gmail.com',
url='https://github.com/xguse/',
license='GPL 3',
packages=find_packages('src'),
package_dir = {'': 'src'},include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points={
'console_scripts':
['blacktie=blacktie:main',
'blacktie-encode=blacktie.scripts.encode_mail_li_file:main',
'blacktie-cummerbund=blacktie.scripts.cummerbund:main']
}
)
|
xguse/blacktie
|
setup.py
|
Python
|
gpl-3.0
| 1,341
|
[
"Bowtie"
] |
eae437fda1b727ff6e753a3edf0058149c07381c0d769eca13eba56bd6bce1eb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# RecuperaPeekFrame.py
#
# Copyright 2014
# Monica Otero <monicaot2001@gmail.com>
# Carlos "casep" Sepulveda <casep@fedoraproject.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Procesa resultado de Ajuste Gaussiano y genera frame dde se encuentra peek
import sys # system lib
import os # operative system lib
import matplotlib.pyplot as plt
import argparse #argument parsing
import scipy.io # input output lib (for save matlab matrix)
import numpy
import scipy.ndimage
from pylab import plot,show
parser = argparse.ArgumentParser(prog='RecuperaPeekFrame.py',
description='Procesa resultado de Ajuste Gaussiano y genera frame dde se encuentra peek',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
args = parser.parse_args()
#Source folder of the file resulting from the gaussian fit (resultado.txt)
sourceFolder = args.sourceFolder
# Check for trailing / on the folder
if sourceFolder[-1] != '/':
sourceFolder+='/'
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
sys.exit()
#OutputFolder for the resulting csv
outputFolder = args.outputFolder
# Check for trailing / on the folder
if outputFolder[-1] != '/':
outputFolder+='/'
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
sys.exit()
def loadResultTxt(sourceFolder,unitFile):
source=sourceFolder + unitFile +'/resultado.txt'
firResultFile = numpy.loadtxt(source)
firResult = firResultFile[0]
return firResult
def main():
file = open(outputFolder+'frame.csv', "w")
header = "Unidad"+'\t"'+"PeekFrame"+'\n'
file.write(header)
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
fitResult = loadResultTxt(sourceFolder,unitFile)
salidaValor='"'+unitFile.rsplit('_', 1)[0]+'"\t"' \
+str(fitResult) +'\n'
file.write(salidaValor)
file.close
return 0
if __name__ == '__main__':
main()
|
creyesp/RF_Estimation
|
STA/helpers/recuperaPeekFrame/RecuperaPeekFrame.py
|
Python
|
gpl-2.0
| 2,900
|
[
"Gaussian"
] |
3d83cdd40ad98df5ab8f4299b441fd66c449a9b08968e7e91730ad9b7ea93248
|
# test_nsdf.py ---
# Changed from nsdf.py from
# https://github.com/BhallaLab/moose-examples/snippets/nsdf.py
from __future__ import print_function
import numpy as np
from datetime import datetime
import getpass
import os
# Use in-repo moose to install.
import moose
print('using moose from: %s' % moose.__file__)
global nsdf
def setup_model():
"""Setup a dummy model with a PulseGen and a SpikeGen. The SpikeGen
detects the leading edges of the pulses created by the PulseGen
and sends out the event times. We record the PulseGen outputValue
as Uniform data and leading edge time as Event data in the NSDF
file.
"""
global nsdf
simtime = 100.0
dt = 1e-3
model = moose.Neutral('/model')
pulse = moose.PulseGen('/model/pulse')
pulse.level[0] = 1.0
pulse.delay[0] = 10
pulse.width[0] = 20
t_lead = moose.SpikeGen('/model/t_lead')
t_lead.threshold = 0.5
moose.connect(pulse, 'output', t_lead,'Vm');
nsdf = moose.NSDFWriter('/model/writer')
nsdf.filename = 'nsdf_demo.h5'
nsdf.mode = 2 #overwrite existing file
nsdf.flushLimit = 100
moose.connect(nsdf, 'requestOut', pulse, 'getOutputValue')
print('event input', nsdf.eventInput, nsdf.eventInput.num)
print(nsdf)
nsdf.eventInput.num = 1
ei = nsdf.eventInput[0]
print(ei.path)
moose.connect(t_lead, 'spikeOut', nsdf.eventInput[0], 'input')
tab = moose.Table('spiketab')
tab.threshold = t_lead.threshold
clock = moose.element('/clock')
for ii in range(32):
moose.setClock(ii, dt)
moose.connect(pulse, 'output', tab, 'spike')
print(datetime.now().isoformat())
moose.reinit()
moose.start(simtime)
print(datetime.now().isoformat())
np.savetxt('nsdf.txt', tab.vector)
###################################
# Set the environment attributes
###################################
nsdf.stringAttr['title'] = 'NSDF writing demo for moose'
nsdf.stringAttr['description'] = '''An example of writing data to NSDF file from MOOSE simulation. In
this simulation we generate square pules from a PulseGen object and
use a SpikeGen to detect the threshold crossing events of rising
edges. We store the pulsegen output as Uniform data and the threshold
crossing times as Event data. '''
nsdf.stringAttr['creator'] = getpass.getuser()
nsdf.stringVecAttr['software'] = ['python2.7', 'moose3' ]
nsdf.stringVecAttr['method'] = ['']
nsdf.stringAttr['rights'] = ''
nsdf.stringAttr['license'] = 'CC-BY-NC'
# Specify units. MOOSE is unit agnostic, so we explicitly set the
# unit attibutes on individual datasets
nsdf.stringAttr['/data/uniform/PulseGen/outputValue/tunit'] = 's'
nsdf.stringAttr['/data/uniform/PulseGen/outputValue/unit'] = 'A'
eventDataPath = '/data/event/SpikeGen/spikeOut/{}_{}_{}/unit'.format(t_lead.vec.value,
t_lead.getDataIndex(),
t_lead.fieldIndex)
nsdf.stringAttr[eventDataPath] = 's'
if __name__ == '__main__':
setup_model()
# Very basic tests
nsdfFile = 'nsdf.txt'
if not os.path.exists( nsdf.filename ):
raise Exception("Test failed. No files : %s" % nsdfFile)
if not os.path.exists( nsdfFile ):
raise Exception("Test failed. No file : %s" % nsdfFile)
data = np.loadtxt( nsdfFile )
assert len(data) == 60001, "Expected 60001 entries"
|
subhacom/moose-core
|
tests/python/test_nsdf.py
|
Python
|
gpl-3.0
| 3,504
|
[
"MOOSE"
] |
aa548e0ef4523e589cb8f3f1971d9a81bcfd80dd79ee1fdab28d6cea29f8e882
|
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
# http://gitorious.org/pyosc/, this file updated 2012-02-01
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0L, 1L)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print "Error: too few bytes for double", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
raise e # bh
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print "SERVER: New client connection."
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print "SERVER: Client connection handled."
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error, e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print "SERVER: Entered server loop"
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print "OSC stream server: Spurious message received."
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error, e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print "SERVER: Connection has been reset by peer."
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return None
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return None
else:
raise e
if not tmp or len(tmp) == 0:
print "CLIENT: Socket has been closed."
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print "CLIENT: Entered receiving thread."
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print "CLIENT: Receiving thread terminated."
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return False
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
|
brianhouse/wavefarm
|
housepy/lib/OSC.py
|
Python
|
gpl-3.0
| 89,984
|
[
"VisIt"
] |
32b474b8b7b9dd5891273342fd45d9a47956b7ebbc4aac371a649fa365516f62
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import pickle
from . import dependency_check
from psi4.driver.molutil import *
from psi4.driver.inputparser import process_input
from psi4.driver.p4util.util import *
from psi4.driver.p4util.text import *
from psi4.driver.qmmm import QMMM
from psi4.driver.plugin import *
from psi4.driver import gaussian_n
from psi4.driver import aliases
from psi4.driver import diatomic
from psi4.driver import wrapper_database
from psi4.driver import wrapper_autofrag
from psi4.driver import json_wrapper
from psi4.driver.driver import *
# Single functions
from psi4.driver.driver_cbs import cbs
from psi4.driver.p4util.python_helpers import set_options, set_module_options, pcm_helper, basis_helper
|
rmcgibbo/psi4public
|
psi4/driver/__init__.py
|
Python
|
lgpl-3.0
| 1,603
|
[
"Psi4"
] |
57592990df5da2d78c5b8abda04ef758ac894f53747426f5a40cd3cf09ef33e5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.