text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from os import unlink
from glob import glob
class PyDpGpCluster(PythonPackage):
"""DP_GP_cluster clusters genes by expression over a time course using a
Dirichlet process Gaussian process model."""
homepage = "https://github.com/PrincetonUniversity/DP_GP_cluster"
git = "https://github.com/PrincetonUniversity/DP_GP_cluster.git"
version('2019-09-22', commit='eec12e74219f916aa86e253783905f7b5e30f6f4')
depends_on('python@2.7:2.8', type=('build', 'run'))
depends_on('py-cython', type='build')
depends_on('py-gpy@0.8.8:0.9.9', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy@0.14:', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-scikit-learn', type=('build', 'run'))
@run_before('build')
def remove_cython_output(self):
for f in glob('DP_GP/*.c'):
unlink(f)
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-dp-gp-cluster/package.py
|
Python
|
lgpl-2.1
| 1,201
|
[
"Gaussian"
] |
5742c990f691d50384020a423e66a4712a82977ff539d094e1cd3beff1b20d08
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
"""
Display a person's siblings in a report window
"""
from gramps.gen.simple import SimpleAccess, SimpleDoc
from gramps.gui.plug.quick import QuickTable
from gramps.gen.relationship import get_relationship_calculator
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
def run(database, document, person):
"""
Loops through the families that the person is a child in, and display
the information about the other children.
"""
# setup the simple access functions
sdb = SimpleAccess(database)
sdoc = SimpleDoc(document)
stab = QuickTable(sdb)
rel_class = get_relationship_calculator(glocale)
# display the title
# feature request 2356: avoid genitive form
sdoc.title(_("Siblings of %s") % sdb.name(person))
sdoc.paragraph("")
stab.columns(_("Sibling"), _("Gender"), _("Birth Date"), _("Type"))
# grab our current id (self):
gid = sdb.gid(person)
# loop through each family in which the person is a child
document.has_data = False
for family in sdb.child_in(person):
# loop through each child in the family
for child in sdb.children(family):
# only display if this child is not the active person
if sdb.gid(child) != gid:
rel_str = rel_class.get_sibling_relationship_string(
rel_class.get_sibling_type(database, person, child),
person.get_gender(), child.get_gender())
else:
rel_str = _('self')
# pass row the child object to make link:
stab.row(child,
sdb.gender(child),
sdb.birth_or_fallback(child),
rel_str)
document.has_data = True
stab.write(sdoc)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/quickview/siblings.py
|
Python
|
gpl-2.0
| 2,650
|
[
"Brian"
] |
017da32bd127ca4ad969ab5dc4fa55f2bfefba047494a6ac3b2896e1414e34d5
|
#!/usr/bin/env python
"""
Rotations, VTK Textbook figure 3-31b.
Note: Make sure Rotations.py is in the same directory as this program.
"""
import Rotations
def main():
file_name, figure, book_color = Rotations.get_program_parameters()
# Set up for six rotations about the y-axis.
figure = 2
book_color = True
Rotations.rotate(file_name, figure, book_color)
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Rendering/RotationsB.py
|
Python
|
apache-2.0
| 422
|
[
"VTK"
] |
5921ba87eec420583d0941a9bbf220c20afe7c6c8bfde1b6e462db5e157207c4
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute FAD between two multivariate Gaussian."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from frechet_audio_distance import fad_utils
flags.DEFINE_string("background_stats", None,
"Tf record containing the background stats (mu sigma).")
flags.DEFINE_string("test_stats", None,
"Tf record containing the test stats (mu sigma).")
flags.mark_flags_as_required(["background_stats", "test_stats"])
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
mu_bg, sigma_bg = fad_utils.read_mean_and_covariances(FLAGS.background_stats)
mu_test, sigma_test = fad_utils.read_mean_and_covariances(FLAGS.test_stats)
fad = fad_utils.frechet_distance(mu_bg, sigma_bg, mu_test, sigma_test)
print("FAD: %f" % fad)
if __name__ == "__main__":
app.run(main)
|
google-research/google-research
|
frechet_audio_distance/compute_fad.py
|
Python
|
apache-2.0
| 1,525
|
[
"Gaussian"
] |
cb8256f0690fa573a1dc691c867ef10c10cfac742095613c49ec79201b71cb06
|
########################################################################
# $HeadURL$
# File: RequestTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/07/24 10:23:40
########################################################################
""" :mod: RequestTests
=======================
.. module: RequestTests
:synopsis: test cases for Request class
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
test cases for Request class
"""
__RCSID__ = "$Id$"
# #
# @file RequestTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/07/24 10:23:52
# @brief Definition of RequestTests class.
# # imports
import unittest
import datetime
# # from DIRAC
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
# # SUT
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.ReqClient import printRequest
def optimizeRequest( req, printOutput = None ):
from DIRAC import gLogger
if printOutput:
if isinstance( printOutput, basestring ):
gLogger.always( 'Request %s:' % printOutput )
printRequest( req )
gLogger.always( '=========== Optimized ===============' )
res = req.optimize()
if printOutput:
printRequest( req )
gLogger.always( '' )
return res
def createRequest( reqType ):
r = Request()
# Simple failover
op1 = Operation()
f = File()
f.LFN = '/This/is/an/LFN'
op1.addFile( f )
op1.Type = 'ReplicateAndRegister'
op1.SourceSE = 'CERN-FAILOVER'
op1.TargetSE = 'CERN-BUFFER'
r.addOperation( op1 )
op2 = Operation()
op2.addFile( f )
op2.Type = 'RemoveReplica'
op2.TargetSE = 'CERN-FAILOVER'
r.addOperation( op2 )
if reqType == 0:
return r
# two files for Failover
f1 = File()
f1.LFN = '/This/is/a/second/LFN'
op3 = Operation()
op3.addFile( f1 )
op3.Type = 'ReplicateAndRegister'
op3.SourceSE = 'CERN-FAILOVER'
op3.TargetSE = 'CERN-BUFFER'
r.addOperation( op3 )
op3 = Operation()
op3.addFile( f1 )
op3.Type = 'RemoveReplica'
op3.TargetSE = 'CERN-FAILOVER'
r.addOperation( op3 )
if reqType == 1:
return r
op = Operation()
op.Type = 'ForwardDiset'
if reqType == 2:
r.addOperation( op )
return r
r.insertBefore( op, r[0] )
if reqType == 3:
return r
op4 = Operation()
op4.Type = 'ForwardDiset'
r.addOperation( op4 )
if reqType == 4:
return r
# 2 different FAILOVER SEs: removal not optimized
r[1].SourceSE = 'RAL-FAILOVER'
r[2].SourceSE = 'RAL-FAILOVER'
if reqType == 5:
return r
# 2 different destinations, same FAILOVER: replication not optimized
r[3].SourceSE = 'RAL-FAILOVER'
r[4].SourceSE = 'RAL-FAILOVER'
r[3].TargetSE = 'RAL-BUFFER'
if reqType == 6:
return r
print 'This should not happen, reqType =', reqType
########################################################################
class RequestTests( unittest.TestCase ):
"""
.. class:: RequestTests
"""
def setUp( self ):
""" set up """
self.fromDict = { "RequestName" : "test", "JobID" : 12345 }
def tearDown( self ):
""" tear down """
del self.fromDict
def test_01CtorSerilization( self ):
""" c'tor and serialization """
# # empty c'tor
req = Request()
self.assertEqual( isinstance( req, Request ), True )
self.assertEqual( req.JobID, 0 )
self.assertEqual( req.Status, "Waiting" )
req = Request( self.fromDict )
self.assertEqual( isinstance( req, Request ), True )
self.assertEqual( req.RequestName, "test" )
self.assertEqual( req.JobID, 12345 )
self.assertEqual( req.Status, "Waiting" )
toJSON = req.toJSON()
self.assertEqual( toJSON["OK"], True, "JSON serialization failed" )
fromJSON = toJSON["Value"]
req = Request( fromJSON )
def test_02Props( self ):
""" props """
# # valid values
req = Request()
req.RequestID = 1
self.assertEqual( req.RequestID, 1 )
req.RequestName = "test"
self.assertEqual( req.RequestName, "test" )
req.JobID = 1
self.assertEqual( req.JobID, 1 )
req.CreationTime = "1970-01-01 00:00:00"
self.assertEqual( req.CreationTime, datetime.datetime( 1970, 1, 1, 0, 0, 0 ) )
req.CreationTime = datetime.datetime( 1970, 1, 1, 0, 0, 0 )
self.assertEqual( req.CreationTime, datetime.datetime( 1970, 1, 1, 0, 0, 0 ) )
req.SubmitTime = "1970-01-01 00:00:00"
self.assertEqual( req.SubmitTime, datetime.datetime( 1970, 1, 1, 0, 0, 0 ) )
req.SubmitTime = datetime.datetime( 1970, 1, 1, 0, 0, 0 )
self.assertEqual( req.SubmitTime, datetime.datetime( 1970, 1, 1, 0, 0, 0 ) )
req.LastUpdate = "1970-01-01 00:00:00"
self.assertEqual( req.LastUpdate, datetime.datetime( 1970, 1, 1, 0, 0, 0 ) )
req.LastUpdate = datetime.datetime( 1970, 1, 1, 0, 0, 0 )
self.assertEqual( req.LastUpdate, datetime.datetime( 1970, 1, 1, 0, 0, 0 ) )
req.Error = ""
def test_04Operations( self ):
""" operations arithmetic and state machine """
req = Request()
self.assertEqual( len( req ), 0 )
transfer = Operation()
transfer.Type = "ReplicateAndRegister"
transfer.addFile( File( { "LFN" : "/a/b/c", "Status" : "Waiting" } ) )
getWaiting = req.getWaiting()
self.assertEqual( getWaiting["OK"], True )
self.assertEqual( getWaiting["Value"], None )
req.addOperation( transfer )
self.assertEqual( len( req ), 1 )
self.assertEqual( transfer.Order, req.Order )
self.assertEqual( transfer.Status, "Waiting" )
getWaiting = req.getWaiting()
self.assertEqual( getWaiting["OK"], True )
self.assertEqual( getWaiting["Value"], transfer )
removal = Operation( { "Type" : "RemoveFile" } )
removal.addFile( File( { "LFN" : "/a/b/c", "Status" : "Waiting" } ) )
req.insertBefore( removal, transfer )
getWaiting = req.getWaiting()
self.assertEqual( getWaiting["OK"], True )
self.assertEqual( getWaiting["Value"], removal )
self.assertEqual( len( req ), 2 )
self.assertEqual( [ op.Status for op in req ], ["Waiting", "Queued"] )
self.assertEqual( req.subStatusList() , ["Waiting", "Queued"] )
self.assertEqual( removal.Order, 0 )
self.assertEqual( removal.Order, req.Order )
self.assertEqual( transfer.Order, 1 )
self.assertEqual( removal.Status, "Waiting" )
self.assertEqual( transfer.Status, "Queued" )
for subFile in removal:
subFile.Status = "Done"
removal.Status = "Done"
self.assertEqual( removal.Status, "Done" )
self.assertEqual( transfer.Status, "Waiting" )
self.assertEqual( transfer.Order, req.Order )
# # len, looping
self.assertEqual( len( req ), 2 )
self.assertEqual( [ op.Status for op in req ], ["Done", "Waiting"] )
self.assertEqual( req.subStatusList() , ["Done", "Waiting"] )
digest = req.toJSON()
self.assertEqual( digest["OK"], True )
getWaiting = req.getWaiting()
self.assertEqual( getWaiting["OK"], True )
self.assertEqual( getWaiting["Value"], transfer )
def test_05FTS( self ):
""" FTS state machine """
req = Request()
req.RequestName = "FTSTest"
ftsTransfer = Operation()
ftsTransfer.Type = "ReplicateAndRegister"
ftsTransfer.TargetSE = "CERN-USER"
ftsFile = File()
ftsFile.LFN = "/a/b/c"
ftsFile.Checksum = "123456"
ftsFile.ChecksumType = "Adler32"
ftsTransfer.addFile( ftsFile )
req.addOperation( ftsTransfer )
self.assertEqual( req.Status, "Waiting", "1. wrong request status: %s" % req.Status )
self.assertEqual( ftsTransfer.Status, "Waiting", "1. wrong ftsStatus status: %s" % ftsTransfer.Status )
# # scheduled
ftsFile.Status = "Scheduled"
self.assertEqual( ftsTransfer.Status, "Scheduled", "2. wrong status for ftsTransfer: %s" % ftsTransfer.Status )
self.assertEqual( req.Status, "Scheduled", "2. wrong status for request: %s" % req.Status )
# # add new operation before FTS
insertBefore = Operation()
insertBefore.Type = "RegisterReplica"
insertBefore.TargetSE = "CERN-USER"
insertFile = File()
insertFile.LFN = "/a/b/c"
insertFile.PFN = "http://foo/bar"
insertBefore.addFile( insertFile )
req.insertBefore( insertBefore, ftsTransfer )
self.assertEqual( insertBefore.Status, "Waiting", "3. wrong status for insertBefore: %s" % insertBefore.Status )
self.assertEqual( ftsTransfer.Status, "Scheduled", "3. wrong status for ftsStatus: %s" % ftsTransfer.Status )
self.assertEqual( req.Status, "Waiting", "3. wrong status for request: %s" % req.Status )
# # prev done
insertFile.Status = "Done"
self.assertEqual( insertBefore.Status, "Done", "4. wrong status for insertBefore: %s" % insertBefore.Status )
self.assertEqual( ftsTransfer.Status, "Scheduled", "4. wrong status for ftsStatus: %s" % ftsTransfer.Status )
self.assertEqual( req.Status, "Scheduled", "4. wrong status for request: %s" % req.Status )
# # reschedule
ftsFile.Status = "Waiting"
self.assertEqual( insertBefore.Status, "Done", "5. wrong status for insertBefore: %s" % insertBefore.Status )
self.assertEqual( ftsTransfer.Status, "Waiting", "5. wrong status for ftsStatus: %s" % ftsTransfer.Status )
self.assertEqual( req.Status, "Waiting", "5. wrong status for request: %s" % req.Status )
# # fts done
ftsFile.Status = "Done"
self.assertEqual( insertBefore.Status, "Done", "5. wrong status for insertBefore: %s" % insertBefore.Status )
self.assertEqual( ftsTransfer.Status, "Done", "5. wrong status for ftsStatus: %s" % ftsTransfer.Status )
self.assertEqual( req.Status, "Done", "5. wrong status for request: %s" % req.Status )
def test_06StateMachine( self ):
""" state machine tests """
r = Request( {"RequestName": "SMT"} )
self.assertEqual( r.Status, "Waiting", "1. wrong status %s" % r.Status )
r.addOperation( Operation( {"Status": "Queued"} ) )
self.assertEqual( r.Status, "Waiting", "2. wrong status %s" % r.Status )
r.addOperation( Operation( {"Status": "Queued"} ) )
self.assertEqual( r.Status, "Waiting", "3. wrong status %s" % r.Status )
r[0].Status = "Done"
self.assertEqual( r.Status, "Waiting", "4. wrong status %s" % r.Status )
r[1].Status = "Done"
self.assertEqual( r.Status, "Done", "5. wrong status %s" % r.Status )
r[0].Status = "Failed"
self.assertEqual( r.Status, "Failed", "6. wrong status %s" % r.Status )
r[0].Status = "Queued"
self.assertEqual( r.Status, "Waiting", "7. wrong status %s" % r.Status )
r.insertBefore( Operation( {"Status": "Queued"} ), r[0] )
self.assertEqual( r.Status, "Waiting", "8. wrong status %s" % r.Status )
r.insertBefore( Operation( {"Status": "Queued"} ), r[0] )
self.assertEqual( r.Status, "Waiting", "9. wrong status %s" % r.Status )
r.insertBefore( Operation( {"Status": "Scheduled"} ), r[0] )
self.assertEqual( r.Status, "Scheduled", "10. wrong status %s" % r.Status )
r.insertBefore( Operation( {"Status": "Queued" } ), r[0] )
self.assertEqual( r.Status, "Waiting", "11. wrong status %s" % r.Status )
r[0].Status = "Failed"
self.assertEqual( r.Status, "Failed", "12. wrong status %s" % r.Status )
r[0].Status = "Done"
self.assertEqual( r.Status, "Scheduled", "13. wrong status %s" % r.Status )
r[1].Status = "Failed"
self.assertEqual( r.Status, "Failed", "14. wrong status %s" % r.Status )
r[1].Status = "Done"
self.assertEqual( r.Status, "Waiting", "15. wrong status %s" % r.Status )
r[2].Status = "Scheduled"
self.assertEqual( r.Status, "Scheduled", "16. wrong status %s" % r.Status )
r[2].Status = "Queued"
self.assertEqual( r.Status, "Waiting", "17. wrong status %s" % r.Status )
r[2].Status = "Scheduled"
self.assertEqual( r.Status, "Scheduled", "18. wrong status %s" % r.Status )
r = Request()
for i in range( 5 ):
r.addOperation( Operation( {"Status": "Queued" } ) )
r[0].Status = "Done"
self.assertEqual( r.Status, "Waiting", "19. wrong status %s" % r.Status )
r[1].Status = "Done"
self.assertEqual( r.Status, "Waiting", "20. wrong status %s" % r.Status )
r[2].Status = "Scheduled"
self.assertEqual( r.Status, "Scheduled", "21. wrong status %s" % r.Status )
r[2].Status = "Done"
self.assertEqual( r.Status, "Waiting", "22. wrong status %s" % r.Status )
def test_07List( self ):
""" setitem, delitem, getitem and dirty """
r = Request()
ops = [ Operation() for i in range( 5 ) ]
for op in ops:
r.addOperation( op )
for i, op in enumerate( ops ):
self.assertEqual( op, r[i], "__getitem__ failed" )
op = Operation()
r[0] = op
self.assertEqual( op, r[0], "__setitem__ failed" )
del r[0]
self.assertEqual( len( r ), 4, "__delitem__ failed" )
def test_08Optimize( self ):
title = {
0: 'Simple Failover',
1: 'Double Failover',
2: 'Double Failover + ForwardDiset',
3: 'ForwardDiset + Double Failover',
4: 'ForwardDiset + Double Failover + ForwardDiset',
5: 'ForwardDiset + Double Failover (# Failover SE) + ForwardDiset',
6: 'ForwardDiset + Double Failover (# Destination SE) + ForwardDiset'
}
debug = False
if debug != False:
print ''
for reqType in title:
r = createRequest( reqType )
res = optimizeRequest( r, printOutput = title[reqType] if ( debug == reqType and debug is not False ) else False )
self.assertEqual( res['OK'], True )
self.assertEqual( res['Value'], True )
if reqType in ( 0, 1 ):
self.assertEqual( len( r ), 2, 'Wrong number of operations: %d' % len( r ) )
self.assertEqual( r[0].Type, 'ReplicateAndRegister' )
self.assertEqual( r[1].Type, 'RemoveReplica' )
if reqType == 1:
self.assertEqual( len( r[0] ), 2, 'Wrong number of files: %d' % len( r[0] ) )
self.assertEqual( len( r[1] ), 2, 'Wrong number of files: %d' % len( r[1] ) )
elif reqType == 2:
self.assertEqual( len( r ), 3, 'Wrong number of operations: %d' % len( r ) )
self.assertEqual( r[0].Type, 'ReplicateAndRegister' )
self.assertEqual( r[1].Type, 'RemoveReplica' )
self.assertEqual( r[2].Type, 'ForwardDiset' )
self.assertEqual( len( r[0] ), 2, 'Wrong number of files: %d' % len( r[0] ) )
self.assertEqual( len( r[1] ), 2, 'Wrong number of files: %d' % len( r[1] ) )
elif reqType == 3:
self.assertEqual( len( r ), 3, 'Wrong number of operations: %d' % len( r ) )
self.assertEqual( r[1].Type, 'ReplicateAndRegister' )
self.assertEqual( r[2].Type, 'RemoveReplica' )
self.assertEqual( r[0].Type, 'ForwardDiset' )
self.assertEqual( len( r[1] ), 2, 'Wrong number of files: %d' % len( r[0] ) )
self.assertEqual( len( r[2] ), 2, 'Wrong number of files: %d' % len( r[1] ) )
elif reqType == 4:
self.assertEqual( len( r ), 4, 'Wrong number of operations: %d' % len( r ) )
self.assertEqual( r[1].Type, 'ReplicateAndRegister' )
self.assertEqual( r[2].Type, 'RemoveReplica' )
self.assertEqual( r[0].Type, 'ForwardDiset' )
self.assertEqual( r[3].Type, 'ForwardDiset' )
self.assertEqual( len( r[1] ), 2, 'Wrong number of files: %d' % len( r[0] ) )
self.assertEqual( len( r[2] ), 2, 'Wrong number of files: %d' % len( r[1] ) )
elif reqType == 5:
self.assertEqual( len( r ), 5, 'Wrong number of operations: %d' % len( r ) )
self.assertEqual( r[1].Type, 'ReplicateAndRegister' )
self.assertEqual( r[2].Type, 'RemoveReplica' )
self.assertEqual( r[3].Type, 'RemoveReplica' )
self.assertEqual( r[0].Type, 'ForwardDiset' )
self.assertEqual( r[4].Type, 'ForwardDiset' )
self.assertEqual( len( r[1] ), 2, 'Wrong number of files: %d' % len( r[0] ) )
self.assertEqual( len( r[2] ), 1, 'Wrong number of files: %d' % len( r[1] ) )
self.assertEqual( len( r[3] ), 1, 'Wrong number of files: %d' % len( r[1] ) )
elif reqType == 6:
self.assertEqual( len( r ), 5, 'Wrong number of operations: %d' % len( r ) )
self.assertEqual( r[1].Type, 'ReplicateAndRegister' )
self.assertEqual( r[2].Type, 'ReplicateAndRegister' )
self.assertEqual( r[3].Type, 'RemoveReplica' )
self.assertEqual( r[0].Type, 'ForwardDiset' )
self.assertEqual( r[4].Type, 'ForwardDiset' )
self.assertEqual( len( r[1] ), 1, 'Wrong number of files: %d' % len( r[0] ) )
self.assertEqual( len( r[2] ), 1, 'Wrong number of files: %d' % len( r[1] ) )
self.assertEqual( len( r[3] ), 2, 'Wrong number of files: %d' % len( r[1] ) )
# # test execution
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase( RequestTests )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
arrabito/DIRAC
|
RequestManagementSystem/Client/test/Test_Request.py
|
Python
|
gpl-3.0
| 16,952
|
[
"DIRAC"
] |
294191c643d4a5a684c9dc22ca7ef27e3b8a65669cc74decd9ebb3a02d0f6af7
|
#!/usr/bin/env python
#
# Copyright (C) 2013 Ben Woodcroft, available under GPLv3 or later
#
from optparse import OptionParser
import sys
from pprint import pprint
import pysam
class ReadLoader:
"""AUX: Call back for getting aligned reads
Used in conjunction with pysam.fetch
"""
def __init__(self):
self.alignedReads = []
def __call__(self, alignment):
self.alignedReads.append(alignment)
if __name__ == '__main__':
# intialise the options parser
parser = OptionParser("\n\n %prog [options]")
# add options hereread
#parser.add_option("-f", "--fasta", dest="fasta", help="Fasta file of sequences to be prepped [required]")
parser.add_option("-b", "--bam", dest="bam", help="BAM file to be analysed [required]")
parser.add_option("-f", "--forward", dest="forward_file", help="Output forwards to this file [required]")
parser.add_option("-r", "--reverse", dest="reverse_file", help="Output reverse reads to this file [required]")
(opts, args) = parser.parse_args()
sam = pysam.Samfile(opts.bam, 'rb')
f = open(opts.forward_file,'w')
r = open(opts.reverse_file,'w')
for reference, contig_length in zip(sam.references, sam.lengths):
rl = ReadLoader()
sam.fetch(reference, 0, contig_length, callback = rl)
print "Found",len(rl.alignedReads),"reads to consider"
for read in rl.alignedReads:
# Ignore unpaired reads or secondary hits - reads should only count once
if read.is_secondary or not read.is_proper_pair:
continue
# Only need to work with the read1's, not their partners
if read.is_read2:
continue
# Not sure how, but this appear to happen somehow. TODO: advise the user how many times
if read.tlen < 0:
continue
# OK, so we have a read1. We should now be able to write out
#TODO: check the +/- 1 are right in the reads below
output = "1 "
if read.is_reverse:
output += str(read.aend-1)
else:
output += str(read.pos+1)
output += " "+str(read.tlen)+"\n"
if read.is_reverse:
r.write(output)
else:
f.write(output)
f.close()
r.close()
#TODO: Advise how many reads were printed out
|
wwood/bbbin
|
gccorrect_preparation.py
|
Python
|
gpl-3.0
| 2,410
|
[
"pysam"
] |
941910fe34e4a251a32f81a14b65f24dedad2efc7e2dd222d8e3eb7bc26a10f2
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasnotebase import HasNoteBase
#-------------------------------------------------------------------------
# "Events having notes"
#-------------------------------------------------------------------------
class HasNote(HasNoteBase):
"""Events having notes"""
name = _('Events having <count> notes')
description = _("Matches events having a certain number of notes")
|
arunkgupta/gramps
|
gramps/gen/filters/rules/event/_hasnote.py
|
Python
|
gpl-2.0
| 1,717
|
[
"Brian"
] |
7bb7cd43ae1f7f545a610aac6c7cc997ec6cf916463d5398f40a20a2cf78860e
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Factory functions producing ABINIT Works.
Works are packed together in a flow. A flow can be ran using abirun (abipy)
Entry points for client code (high-level interface)
"""
from __future__ import unicode_literals, division, print_function
import os
from .abiobjects import KSampling, Screening, SelfEnergy, ExcHamiltonian, HilbertTransform
from .strategies import ScfStrategy, NscfStrategy, ScreeningStrategy, SelfEnergyStrategy, MdfBse_Strategy
from .works import BandStructureWork, G0W0Work, BseMdfWork
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
def bandstructure_work(structure, pseudos, scf_kppa, nscf_nband,
ndivsm, accuracy="normal", spin_mode="polarized",
smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
dos_kppa=None, workdir=None, manager=None, work_class=None, **extra_abivars):
"""
Returns a :class:`Work` for bandstructure calculations.
Args:
structure: Pymatgen structure.
pseudos: List of `Pseudo` objects.
scf_kppa: Defines the sampling used for the SCF run.
nscf_nband: Number of bands included in the NSCF run.
ndivs: Number of divisions used to sample the smallest segment of the k-path.
accuracy: Accuracy of the calculation.
spin_mode: Spin polarization.
smearing: Smearing technique.
charge: Electronic charge added to the unit cell.
scf_algorithm: Algorithm used for solving of the SCF cycle.
dos_kppa: Defines the k-point sampling used for the computation of the DOS
(None if DOS is not wanted).
workdir: Working directory.
manager: :class:`TaskManager` instance.
extra_abivars: Dictionary with extra variables passed to ABINIT.
"""
# SCF calculation.
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge,
scf_algorithm=scf_algorithm, **extra_abivars)
# Band structure calculation.
nscf_ksampling = KSampling.path_from_structure(ndivsm, structure)
nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
# DOS calculation.
dos_strategy = None
if dos_kppa is not None:
dos_ksampling = KSampling.automatic_density(structure, dos_kppa, chksymbreak=0)
#dos_ksampling = KSampling.monkhorst(dos_ngkpt, shiftk=dos_shiftk, chksymbreak=0)
dos_strategy = NscfStrategy(scf_strategy, dos_ksampling, nscf_nband, nscf_solver=None, **extra_abivars)
if work_class is None: work_class = BandStructureWork
return work_class(scf_strategy, nscf_strategy, dos_inputs=dos_strategy,
workdir=workdir, manager=manager)
#def relaxation_work(workdir, manager, structure, pseudos, scf_kppa,
# accuracy="normal", spin_mode="polarized",
# smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, **extra_abivars):
# """
# Returns a Work object that performs structural relaxations.
#
# Args:
# workdir:
# Working directory.
# manager:
# `TaskManager` object.
# structure:
# Pymatgen structure.
# pseudos:
# List of `Pseudo` objects.
# scf_kppa:
# Defines the sampling used for the SCF run.
# accuracy:
# Accuracy of the calculation.
# spin_mode:
# Spin polarization.
# smearing:
# Smearing technique.
# charge:
# Electronic charge added to the unit cell.
# scf_algorithm:
# Algorithm used for solving the SCF cycle.
# """
# # SCF calculation.
# scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
# relax_algo =
#
# relax_strategy = RelaxStrategy(structure, pseudos, scf_ksampling, relax_algo,
# accuracy=accuracy, spin_mode=spin_mode, smearing=smearing,
# charge=charge, scf_algorithm=scf_algorithm)
#
# #return Relaxation(relax_strategy, workdir=workdir, manager=manager)
def g0w0_with_ppmodel_work(structure, pseudos, scf_kppa, nscf_nband, ecuteps, ecutsigx,
accuracy="normal", spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
ppmodel="godby", charge=0.0, scf_algorithm=None, inclvkb=2, scr_nband=None,
sigma_nband=None, gw_qprange=1, workdir=None, manager=None, work_class=None, **extra_abivars):
"""
Returns a :class:`Work` object that performs G0W0 calculations for the given the material.
Args:
structure: Pymatgen structure.
pseudos: List of `Pseudo` objects.
scf_kppa: Defines the sampling used for the SCF run.
nscf_nband: Number of bands included in the NSCF run.
ecuteps: Cutoff energy [Ha] for the screening matrix.
ecutsigx: Cutoff energy [Ha] for the exchange part of the self-energy.
accuracy: Accuracy of the calculation.
spin_mode: Spin polarization.
smearing: Smearing technique.
ppmodel: Plasmonpole technique.
charge: Electronic charge added to the unit cell.
scf_algorithm: Algorithm used for solving of the SCF cycle.
inclvkb: Treatment of the dipole matrix elements (see abinit variable).
scr_nband: Number of bands used to compute the screening (default is nscf_nband)
sigma_nband: Number of bands used to compute the self-energy (default is nscf_nband)
gw_qprange: Option for the automatic selection of k-points and bands for GW corrections.
See Abinit docs for more detail. The default value makes the code compute the
QP energies for all the point in the IBZ and one band above and one band below the Fermi level.
workdir: Working directory.
manager: :class:`TaskManager` instance.
extra_abivars: Dictionary with extra variables passed to ABINIT.
"""
# TODO: Cannot use istwfk != 1.
if "istwfk" not in extra_abivars:
extra_abivars["istwfk"] = "*1"
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge,
scf_algorithm=scf_algorithm, **extra_abivars)
nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
if scr_nband is None: scr_nband = nscf_nband
if sigma_nband is None: sigma_nband = nscf_nband
screening = Screening(ecuteps, scr_nband, w_type="RPA", sc_mode="one_shot",
hilbert=None, ecutwfn=None, inclvkb=inclvkb)
self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening,
gw_qprange=gw_qprange, ppmodel=ppmodel)
scr_strategy = ScreeningStrategy(scf_strategy, nscf_strategy, screening, **extra_abivars)
sigma_strategy = SelfEnergyStrategy(scf_strategy, nscf_strategy, scr_strategy, self_energy,
**extra_abivars)
if work_class is None: work_class = G0W0Work
return work_class(scf_strategy, nscf_strategy, scr_strategy, sigma_strategy, workdir=workdir, manager=manager)
def g0w0_extended_work(structure, pseudos, scf_kppa, nscf_nband, ecuteps, ecutsigx, accuracy="normal", spin_mode="polarized",
smearing="fermi_dirac:0.1 eV", response_models=["godby"], charge=0.0, inclvkb=2,
scr_nband=None, sigma_nband=None, workdir=None, manager=None, gamma=True, nksmall=20, work_class=None,
**extra_abivars):
"""
Returns a :class:`Work` object that performs G0W0 calculations for the given the material.
Args:
structure: Pymatgen structure.
pseudos: List of `Pseudo` objects.
scf_ Defines the sampling used for the SCF run.
nscf_nband: Number of bands included in the NSCF run.
ecuteps: Cutoff energy [Ha] for the screening matrix.
ecutsigx: Cutoff energy [Ha] for the exchange part of the self-energy.
accuracy: Accuracy of the calculation.
spin_mode: Spin polarization.
smearing: Smearing technique.
ppmodel: Plasmonpole technique.
charge: Electronic charge added to the unit cell.
scf_algorithm: Algorithm used for solving of the SCF cycle.
inclvkb: Treatment of the dipole matrix elements (see abinit variable).
scr_nband: Number of bands used to compute the screening (default is nscf_nband)
sigma_nband: Number of bands used to compute the self-energy (default is nscf_nband)
workdir: Working directory.
manager: :class:`TaskManager` instance.
nksamll: if not None, a DFT bandstucture calculation will be added after after the sc run
extra_abivars: Dictionary with extra variables passed to ABINIT.
"""
# TODO: Cannot use istwfk != 1.
if gamma:
if scf_kppa == 1:
scf_ksampling = KSampling.gamma_centered(kpts=(1, 1, 1))
nscf_ksampling = KSampling.gamma_centered(kpts=(1, 1, 1))
elif scf_kppa == 2:
scf_ksampling = KSampling.gamma_centered(kpts=(2, 2, 2))
nscf_ksampling = KSampling.gamma_centered(kpts=(2, 2, 2))
elif scf_kppa <= 10:
scf_ksampling = KSampling.gamma_centered(kpts=(scf_kppa, scf_kppa, scf_kppa))
nscf_ksampling = KSampling.gamma_centered(kpts=(scf_kppa, scf_kppa, scf_kppa))
else:
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0, shifts=(0, 0, 0))
nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0, shifts=(0, 0, 0))
else:
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
if "istwfk" not in extra_abivars:
extra_abivars["istwfk"] = "*1"
scf_strategy = []
to_add = {}
scf_nband = min(nscf_nband)
#print(scf_nband)
extra_abivars.update(to_add)
for k in extra_abivars.keys():
if k[-2:] == '_s':
var = k[:len(k)-2]
values = extra_abivars.pop(k)
to_add.update({k: values[-1]})
for value in values:
extra_abivars[var] = value
extra_abivars['pawecutdg'] = extra_abivars['ecut']*2
scf_strategy.append(ScfStrategy(structure, pseudos, scf_ksampling, accuracy=accuracy,
spin_mode=spin_mode, smearing=smearing, charge=charge,
scf_algorithm=None, **extra_abivars))
#temporary for testing a new approach ...
spread_scr = False if os.path.isfile('no_spread_scr') else True
if len(scf_strategy) == 0:
scf_strategy.append(ScfStrategy(structure, pseudos, scf_ksampling, accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge, scf_algorithm=None, **extra_abivars))
scf_strategy[-1].electrons.nband = scf_nband
nscf_strategy = NscfStrategy(scf_strategy[-1], nscf_ksampling, max(nscf_nband), **extra_abivars)
if scr_nband is None:
scr_nband = nscf_nband
if sigma_nband is None:
sigma_nband = nscf_nband
if ecutsigx < max(ecuteps):
ecutsigx = max(ecuteps)
sigma_strategy = []
if 'cd' in response_models:
hilbert = HilbertTransform(nomegasf=100, domegasf=None, spmeth=1, nfreqre=None, freqremax=None, nfreqim=None,
freqremin=None)
for response_model in response_models:
for ecuteps_v in ecuteps:
for nscf_nband_v in nscf_nband:
scr_nband = nscf_nband_v
sigma_nband = nscf_nband_v
if response_model == 'cd':
screening = Screening(ecuteps_v, scr_nband, w_type="RPA", sc_mode="one_shot", hilbert=hilbert,
ecutwfn=None, inclvkb=inclvkb)
self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening, hilbert=hilbert)
else:
ppmodel = response_model
screening = Screening(ecuteps_v, scr_nband, w_type="RPA", sc_mode="one_shot", ecutwfn=None,
inclvkb=inclvkb)
self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening, ppmodel=ppmodel,
gw_qprange=1)
scr_strategy = ScreeningStrategy(scf_strategy[-1], nscf_strategy, screening, **extra_abivars)
sigma_strategy.append(SelfEnergyStrategy(scf_strategy[-1], nscf_strategy, scr_strategy, self_energy,
**extra_abivars))
if work_class is None: work_class = G0W0Work
return work_class(scf_strategy, nscf_strategy, scr_strategy, sigma_strategy, workdir=workdir, manager=manager,
spread_scr=spread_scr, nksmall=nksmall)
#def g0w0_with_cd_work(structure, pseudos, scf_kppa, nscf_nband, ecuteps, ecutsigx, hilbert,
# accuracy="normal", spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
# charge=0.0, scf_algorithm=None, inclvkb=2, scr_nband=None,
# sigma_nband=None, workdir=None, manager=None, **extra_abivars):
# """
# Returns a Work object that performs G0W0 calculations for the given the material.
#
# Args:
# structure:
# Pymatgen structure.
# pseudos:
# List of `Pseudo` objects.
# scf_kppa:
# Defines the sampling used for the SCF run.
# nscf_nband:
# Number of bands included in the NSCF run.
# ecuteps:
# Cutoff energy [Ha] for the screening matrix.
# ecutsigx:
# Cutoff energy [Ha] for the exchange part of the self-energy.
# hilbert:
# `HilbertTransform` object with the parameters defining the frequency mesh
# used for the spectral function and the frequency mesh used for the polarizability
# accuracy:
# Accuracy of the calculation.
# spin_mode:
# Spin polarization.
# smearing:
# Smearing technique.
# charge:
# Electronic charge added to the unit cell.
# scf_algorithm:
# Algorithm used for solving of the SCF cycle.
# inclvkb:
# Treatment of the dipole matrix elements (see abinit variable).
# scr_nband:
# Number of bands used to compute the screening (default is nscf_nband)
# sigma_nband:
# Number of bands used to compute the self-energy (default is nscf_nband)
# workdir:
# Working directory.
# manager:
# `TaskManager` instance.
# extra_abivars
# Dictionary with extra variables passed to ABINIT.
# """
# # TODO: Cannot use istwfk != 1.
# if "istwfk" not in extra_abivars:
# extra_abivars["istwfk"] = "*1"
#
# scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
#
# scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
# accuracy=accuracy, spin_mode=spin_mode,
# smearing=smearing, charge=charge,
# scf_algorithm=None, **extra_abivars)
#
# nscf_ksampling = KSampling.automatic_density(structure, 1, chksymbreak=0)
#
# nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
#
# if scr_nband is None: scr_nband = nscf_nband
# if sigma_nband is None: sigma_nband = nscf_nband
#
# screening = Screening(ecuteps, scr_nband, w_type="RPA", sc_mode="one_shot",
# hilbert=hilbert, ecutwfn=None, inclvkb=inclvkb)
#
# self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening,
# hilbert=hilbert)
#
# scr_strategy = ScreeningStrategy(scf_strategy, nscf_strategy, screening,
# **extra_abivars)
#
# sigma_strategy = SelfEnergyStrategy(scf_strategy, nscf_strategy, scr_strategy, self_energy,
# **extra_abivars)
#
# return G0W0Work(scf_strategy, nscf_strategy, scr_strategy, sigma_strategy,
# workdir=workdir, manager=manager)
def bse_with_mdf_work(structure, pseudos, scf_kppa, nscf_nband, nscf_ngkpt, nscf_shiftk,
ecuteps, bs_loband, bs_nband, soenergy, mdf_epsinf,
exc_type="TDA", bs_algo="haydock", accuracy="normal", spin_mode="polarized",
smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, workdir=None, manager=None,
work_class=None, **extra_abivars):
"""
Returns a :class:`Work` object that performs a GS + NSCF + Bethe-Salpeter calculation.
The self-energy corrections are approximated with the scissors operator.
The screening in modeled by the model dielectric function.
Args:
structure: :class:`Structure` object.
pseudos: List of `Pseudo` objects.
scf_kppa: Defines the sampling used for the SCF run.
nscf_nband: Number of bands included in the NSCF run.
nscf_ngkpt: Divisions of the k-mesh used for the NSCF and the BSE run.
nscf_shiftk: Shifts used for the NSCF and the BSE run.
ecuteps: Cutoff energy [Ha] for the screening matrix.
bs_loband: Index of the first occupied band included the e-h basis set
(ABINIT convention i.e. first band starts at 1).
Can be scalar or array of shape (nsppol,)
bs_nband: Highest band idex used for the construction of the e-h basis set.
soenergy: Scissor energy in Hartree.
mdf_epsinf: Value of the macroscopic dielectric function used in expression for the model dielectric function.
exc_type: Approximation used for the BSE Hamiltonian (Tamm-Dancoff or coupling).
bs_algo: Algorith for the computatio of the macroscopic dielectric function.
accuracy: Accuracy of the calculation.
spin_mode: Spin polarization.
smearing: Smearing technique.
charge: Electronic charge added to the unit cell.
scf_algorithm: Algorithm used for solving the SCF cycle.
workdir: Working directory.
manager: :class:`TaskManger` instance.
extra_abivars: Dictionary with extra variables passed to ABINIT.
"""
# TODO: Cannot use istwfk != 1.
if "istwfk" not in extra_abivars:
extra_abivars["istwfk"] = "*1"
# Ground-state strategy.
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge, scf_algorithm=None, **extra_abivars)
# NSCF calculation with the randomly-shifted k-mesh.
nscf_ksampling = KSampling.monkhorst(nscf_ngkpt, shiftk=nscf_shiftk, chksymbreak=0)
nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
# Strategy for the BSE calculation.
exc_ham = ExcHamiltonian(bs_loband, bs_nband, soenergy, coulomb_mode="model_df", ecuteps=ecuteps,
spin_mode=spin_mode, mdf_epsinf=mdf_epsinf, exc_type=exc_type, algo=bs_algo,
bs_freq_mesh=None, with_lf=True, zcut=None)
bse_strategy = MdfBse_Strategy(scf_strategy, nscf_strategy, exc_ham, **extra_abivars)
if work_class is None: work_class = BseMdfWork
return work_class(scf_strategy, nscf_strategy, bse_strategy, workdir=workdir, manager=manager)
|
sonium0/pymatgen
|
pymatgen/io/abinitio/calculations.py
|
Python
|
mit
| 20,659
|
[
"ABINIT",
"pymatgen"
] |
3f7ff4158ac66993604818444f44e258f224f3670ec7d7eec0bf4dd03c83cb66
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.geraudbourdin.svgview.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','com.geraudbourdin.svgview.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComGeraudbourdinSvgviewModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('there is discrepancy between the architectures specified in module manifest and compiled binary.')
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
die('please update manifest to match module binary architectures.')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
|
titanium-forks/GeraudBourdin.Ti.AndroidSvgView
|
iphone/build.py
|
Python
|
mit
| 8,522
|
[
"VisIt"
] |
2a0ce8c9ead6b8faf62abc197b1f972033c8a8d2b2011a4dc051fae1e6535c34
|
""" This is a module to handle generic ASE (gui) defaults from a ~/.ase/gui.py configuration file, if it exists.
It is imported when opening ag and can then be modified at runtime, if necessary.
syntax for each entry:
gui_default_settings['key'] = value
"""
gui_default_settings = {
'gui_graphs_string' : 'i, e - E[-1]', # default for the graph command in the gui
'gui_foreground_color': '#000000',
'gui_background_color': '#ffffff',
'covalent_radii' : None,
'radii_scale': 0.89,
}
def read_defaults():
import os.path
name = os.path.expanduser('~/.ase/gui.py')
config = gui_default_settings
if os.path.exists(name):
execfile(name)
return config
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/gui/defaults.py
|
Python
|
gpl-2.0
| 702
|
[
"ASE"
] |
a3a7e9adad0d3b6551fbf7c582ef028ac5dfeae6e26ef38fc1c061fe9f59dba8
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from pressroom.models import Article
from django.contrib.auth.models import User
from django import forms
from django.forms import ModelForm
# Create your models here.
class Question(models.Model):
article = models.ForeignKey(Article)
block = models.IntegerField(blank=True,default=-1)
user = models.ForeignKey(User)
text = models.TextField("Ask a question.")
notify = models.BooleanField()
offensive = models.BooleanField()
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
if self.offensive:
return "Question #"+str(self.id)+" on '"+self.article.headline+"' (offensive)"
else:
return "Question #"+str(self.id)+" on '"+self.article.headline+"'"
def get_absolute_url(self):
return "%s#question-%s-%s" % (self.article.get_absolute_url(),self.block,self.id)
class QuestionForm(ModelForm):
text = forms.CharField(widget=forms.Textarea(attrs={'rows':5}))
class Meta:
model = Question
exclude = ('article', 'block', 'user', 'offensive')
class Answer(models.Model):
question = models.ForeignKey(Question)
user = models.ForeignKey(User)
text = models.TextField("Answer the question.")
reference = models.URLField(blank=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "Answer #"+str(self.id)+" to question #"+str(self.question.id)+" on '"+self.question.article.headline+"'"
def get_absolute_url(self):
return "%s#answer-%s-%s" % (self.question.article.get_absolute_url(),self.question.block,self.id)
class AnswerForm(ModelForm):
text = forms.CharField(widget=forms.Textarea(attrs={'rows':3}))
class Meta:
model = Answer
exclude = ('question', 'user', 'offensive')
|
brianboyer/newsmixer
|
pie/questions/models.py
|
Python
|
gpl-3.0
| 2,701
|
[
"Brian"
] |
708413a643740dcb5b840f641c70fe0fd8ef467f89ce8ca6577b37b6b1871515
|
# -*- coding: UTF-8 -*-
"""This module contains functions related to the VPT2 approximation."""
import sys
import os
import numpy as np
from chemphysconst import Constants
from . import Geometry
from . import Harmonic
# from numpy import linalg
# from . import printfunctions as PF
# Module globals
FLOAT = np.float128
COMPLEX = np.complex256
CONST = Constants()
CORIOLIS_RESONANCE_THRESH = 10.0 # 1/cm
class VPT2_ForceFields(object):
"""This constructs higher-order force fields and property derivatives."""
def __init__(self, path, harmonic, **kwargs):
super(VPT2_ForceFields, self).__init__()
self.path = path
self.harmonic = harmonic
self.geometry = harmonic.geometry
self.kwargs = kwargs
if 'anharm_displacement' not in kwargs:
self.anharm_displacement = 0.05
else:
self.anharm_displacement = kwargs['anharm_displacement']
self.has_cubic = False
self.has_semiquartic = False
def transform_to_normal_coordinate_fc(self, hessian):
"""
Transform a Cartesian Hessian to normal mode force constants.
Coming from a normal mode displacement we assume that the atom ordering
stays the same!
The Hessian should have the units Eh/(bohr^2).
"""
# Get constants
# h_bar = CONST.planck_constant() / (2 * np.pi)
hartee_to_joule = CONST.hartree_energy("J")
bohr_to_meter = CONST.bohr_radius()
u_to_kg = CONST.atomic_mass_constant() # 1.66053904e-27 kg
c = CONST.speed_of_light() # m/s
threeN = len(hessian)
# Mass-weight the Hessian
mw_Hessian = self.harmonic.mass_weight_Hessian(hessian)
mat_L = self.harmonic.mat_L
diag_Hess = self.harmonic.diag_Hess
harm_freq = self.harmonic.harmonic_frequencies(diag_Hess, "1/s")
conversion_factor = hartee_to_joule / (4 * np.pi**2 * c * u_to_kg *
bohr_to_meter**2 * 1e2)
# Transformation
nc_hessian = np.zeros((threeN, threeN), COMPLEX)
for i in range(threeN):
for j in range(threeN):
for m in range(threeN):
for n in range(threeN):
nc_hessian[i, j] += (mat_L[m, i] * mat_L[n, j] *
mw_Hessian[m, n])
nc_hessian[i, j] *= 1 / np.sqrt(harm_freq[i] * harm_freq[j])
# nc_hessian has the unit of J * s^2 * Eh / (amu * bohr^2)
return nc_hessian * conversion_factor
def transform_displaced_Hessians(self, hessians):
"""Return a list of Hessians transformed into the norm-coord-domain."""
nc_Hessians = []
for hessian in hessians:
nc_Hessians.append(self.transform_to_normal_coordinate_fc(hessian))
return nc_Hessians
def check_cubic(self, phi):
"""
Check if the cubic force constants fulfil the Schwarz equation.
Phi_ijk = Phi_jik = Phi_jki = Phi_ikj = Phi_kij = Phi_kji
>> Doesn't do any checks at the moment.
>> Need to implement printing control and proper error handling.
"""
nVib = self.geometry.nVib()
# precision = 9
# i_str = "{:> 3d} "
# const_str = " {{:> {},.{}f}}".format(7 + precision, precision)
# print_str = i_str * 3 + const_str * 6 # + '\n'
for i in range(nVib):
for j in range(i, nVib):
for k in range(j, nVib):
# vec = [phi[i, j, k],
# phi[j, i, k],
# phi[j, k, i],
# phi[i, k, j],
# phi[k, i, j],
# phi[k, j, i]]
# if np.abs(phi[i, j, k]) > 1e-4:
# print print_str.format(i + 7, j + 7, k + 7, *vec)
phi[j, i, k] = phi[i, j, k]
phi[j, k, i] = phi[i, j, k]
phi[i, k, j] = phi[i, j, k]
phi[k, i, j] = phi[i, j, k]
phi[k, j, i] = phi[i, j, k]
return phi
def calculate_cubic_force_field(self, nc_Hessians):
"""
Generate a cubic force-field from a set of displacement Hessians.
nc_Hessians holds all 2*3*N Hessians
Phi_ijk = (Phi_ij^+ - Phi_ij^-)/(2 * Delta q)
"""
def check(cub1, cub2):
if np.abs(cub1 - cub2) > 1e-4:
return np.abs(cub1 - cub2)
else:
return False
nTransRot = self.geometry.nTransRot()
nVib = self.geometry.nVib()
disp = 2 * self.anharm_displacement
cubic = np.zeros((nVib, nVib, nVib), FLOAT)
for i in range(nVib):
for j in range(nVib):
for k in range(nVib):
k *= 2
pos = nc_Hessians[k][i + nTransRot, j + nTransRot].real
neg = nc_Hessians[k + 1][i + nTransRot, j + nTransRot].real
cubic[i, j, int(k / 2)] = (pos - neg) / disp
cubic = self.check_cubic(cubic)
self.cubic = cubic
self.has_cubic = True
return 1
def calculate_semiquartic_force_field(self, nc_Hessians):
"""
Generate a semi-quartic force field from a set of displaced Hessians.
Phi_ijkk = (Phi_ij^+ + Phi_ij^- 2Phi_ij^0)/(q_k)^2
"""
nTransRot = self.geometry.nTransRot()
threeN = 3 * len(self.geometry.atoms)
nVib = threeN - nTransRot
hessian0 = self.harmonic.hessian
nc_Hessian_zero = self.transform_to_normal_coordinate_fc(hessian0)
semiquartic = np.zeros((nVib, nVib, nVib), FLOAT)
for i in range(nVib):
for j in range(nVib):
for k in range(nVib):
pos = nc_Hessians[2 * k][i + nTransRot, j + nTransRot]
neg = nc_Hessians[2 * k + 1][i + nTransRot, j + nTransRot]
hess_zero = nc_Hessian_zero[i + nTransRot, j + nTransRot]
semiquartic[i, j, k] = ((pos.real + neg.real -
2 * hess_zero.real) /
(self.anharm_displacement**2))
self.semiquartic = semiquartic
self.has_semiquartic = True
return 1
class VPT2_file(object):
"""
This class reads in a .vpt2 file resulting from an ORCA VPT2 calculation.
"""
def __init__(self, vpt2_file_path):
super(VPT2_file, self).__init__()
self.file_path = self.check_vpt2_file(vpt2_file_path)
self.has_geometry = False
self.has_harmonic = False
self.has_cubic = False
self.has_semiquartic = False
self.geometry = self.get_geometry()
self.harmonic = self.get_harmonic()
self.cubic = self.get_cubic()
self.semiquartic = self.get_semiquartic()
def check_vpt2_file(self, file_path):
"""Return the VPT2 file as a string."""
if not os.path.exists(file_path):
sys.exit("VPT2_file.read_vpt2_file(): "
"Could not find vpt2 file.")
return file_path
def get_geometry(self):
"""Return the geometry object."""
with open(self.file_path) as file_object:
line = file_object.readline()
while line:
if "Atomic coordinates in Angstroem" in line:
line = file_object.readline()
raw_geom = []
for i in range(int(line.strip())):
split = file_object.readline().split()
raw_geom.append([i, split[0], split[2],
split[3], split[4], split[5]])
self.has_geometry = True
break
line = file_object.readline()
if not self.has_geometry:
sys.exit("VPT2_file.get_geometry(): "
"Could not find a valid geometry.")
return Geometry(raw_geom, use_own_masses=True, distance_units="Angs")
def get_harmonic(self):
"""Return the harmonic object."""
with open(self.file_path) as file_object:
line = file_object.readline()
while line:
if "Hessian[i][j] in Eh/(bohr**2)" in line:
line = file_object.readline()
size = tuple(int(i) for i in line.strip().split())
hessian = np.zeros(size, FLOAT)
for i in range(np.prod(size)):
split = file_object.readline().strip().split()
hessian[int(split[0]), int(split[1])] = FLOAT(split[2])
self.has_harmonic = True
break
line = file_object.readline()
if not self.has_harmonic:
sys.exit("VPT2_file.get_geometry(): "
"Could not find a valid Hessian.")
return Harmonic(self.geometry, hessian=hessian)
def get_cubic(self):
"""Return the cubic force field as a nxnxn numpy matrix."""
with open(self.file_path) as file_object:
line = file_object.readline()
while line:
if "Cubic[i][j][k] force field in 1/cm" in line:
line = file_object.readline()
size = tuple(int(i) for i in line.strip().split())
cubic = np.zeros(size, FLOAT)
for i in range(np.prod(size)):
split = file_object.readline().strip().split()
cubic[int(split[0]),
int(split[1]),
int(split[2])] = FLOAT(split[3])
self.has_cubic = True
break
line = file_object.readline()
if not self.has_cubic:
sys.exit("VPT2_file.get_geometry(): "
"Could not find a valid cubic force field.")
return cubic
def get_semiquartic(self):
"""Return the semiquartic force field as a nxnxn numpy matrix."""
with open(self.file_path) as file_object:
line = file_object.readline()
while line:
if "Semi-quartic[i][j][k][k] force field in 1/cm" in line:
line = file_object.readline()
size = tuple(int(i) for i in line.strip().split())
semiquartic = np.zeros(size, FLOAT)
for i in range(np.prod(size)):
split = file_object.readline().strip().split()
semiquartic[int(split[0]),
int(split[1]),
int(split[2])] = FLOAT(split[3])
self.has_semiquartic = True
break
line = file_object.readline()
if not self.has_semiquartic:
sys.exit("VPT2_file.get_geometry(): "
"Could not find a valid semiquartic force field.")
return semiquartic
class VPT2(object):
"""
This handles all calculations related to a cubic/semi-quartic force field.
This includes anharmonic constants, fundamentals, overtones as well as
combination bands, VibRot constants and anharmonic properties.
"""
def __init__(self, harmonic, cubic, semiquartic, **kwargs):
super(VPT2, self).__init__()
self.harmonic = harmonic
self.geometry = harmonic.geometry
self.kwargs = kwargs
if type(cubic) == np.ndarray:
self.cubic = cubic
else:
sys.exit("VibRot.VPT2.__init__(): "
"A valid cubic force field is necessary")
if type(semiquartic) == np.ndarray:
self.semiquartic = semiquartic
else:
sys.exit("VibRot.VPT2.__init__(): "
"A valid semi-quartic force field is necessary")
if "print_level" in kwargs:
self.print_level = kwargs["print_level"]
else:
self.print_level = 0
# Common Variables
self.nTransRot = harmonic.geometry.nTransRot()
self.nVib = harmonic.geometry.nVib()
self.harm_freq = harmonic.freq_inv_cm[self.nTransRot:].real
self.mat_D = self.harmonic_VPT2_derivative()
def anharmonic_constants(self):
"""
Return the anharmonic constants chi as an (3N-nTransRot)**2 tensor.
Calculated according to
Papousek/Alijev, 1982, isbn: 9780444997371, 160 pp. and
Amos/Handy/Jayatilaka (doi:10.1063/1.461259)
"""
cubic = self.cubic
semiquartic = self.semiquartic
fermi_resonances_overview = self.detect_Fermi_resonances(self.mat_D)
fermi_resonances = [set(f[0]) for f in fermi_resonances_overview]
def omega(w_k, w_l, w_m):
# Eq. 6c Amos/Handy/Jayatilaka (doi:10.1063/1.461259)
omega_klm = (w_m * (w_k**2 + w_l**2 - w_m**2) / (2 *
((w_k + w_l + w_m) *
(-w_k + w_l + w_m) *
(w_k - w_l + w_m) *
(w_k + w_l - w_m))))
return omega_klm
def omega_fermi(w_k, w_l, w_m):
# Eq. 6d Amos/Handy/Jayatilaka (doi:10.1063/1.461259)
omega_klm = 0.125 * (1 / (w_k + w_l + w_m) +
1 / (-w_k + w_l + w_m) +
1 / (w_k - w_l + w_m))
return omega_klm
def check_fermi(i, j, l):
if set([i, j, l]) in fermi_resonances:
return True
# Collect the necessary variables
nVib = self.nVib
nTransRot = self.nTransRot
w = self.harm_freq
cz = self.harmonic.coriolis_zeta()
b_e = self.harmonic.rot_const_inv_cm
chi = np.zeros((nVib, nVib), FLOAT)
for k in range(nVib):
for l in range(k, nVib):
if k == l:
# Term2 in Eq. 17.1.2 of Papousek/Alijev
chi_t2 = 0.0
for m in range(nVib):
if check_fermi(k, l, m):
if self.print_level:
print("Fermi resonance: w_%s~2w_%s" % (k, m))
# Eq. 6b Amos/Handy/Jayatilaka
chi_t2 += (0.125 * cubic[k, k, m]**2 *
(1 / w[m] + 0.25 / (2 * w[k] + w[m])))
else:
# Eq. 6a Amos/Handy/Jayatilaka
chi_t2 += (cubic[k, k, m]**2 *
(8 * w[k]**2 - 3 * w[m]**2) /
(16 * w[m] * (4 * w[k]**2 - w[m]**2)))
# Eq. 17.1.2 of Papousek/Alijev
chi[k, k] = semiquartic[k, k, k] / 16 - chi_t2
else:
# Term 1 in Eq. 17.1.3 of Papousek/Alijev
chi_t1 = 0.25 * semiquartic[k, k, l]
chi_t2 = 0.0
chi_t3 = 0.0
chi_t4 = 0.0
for m in range(nVib):
# Term 2 in Eq. 17.1.3 of Papousek/Alijev
chi_t2 -= cubic[k, k, m] * cubic[l, l, m] / w[m]
if check_fermi(k, l, m):
if self.print_level:
print("Fermi resonance: "
"w_%s~w_%s+w_%s" % (k, l, m))
chi_t3 -= (cubic[k, l, m]**2 *
omega_fermi(w[k], w[l], w[m]))
else:
# Term 3 in Eq. 17.1.3 of Papousek/Alijev
chi_t3 -= (cubic[k, l, m]**2 *
omega(w[k], w[l], w[m]))
lz, kz = l + nTransRot, k + nTransRot
# Term 4 in Eq. 17.1.3 of Papousek/Alijev
for axis in range(3):
chi_t4 += (cz[axis, kz, lz]**2 *
(w[k] / w[l] + w[l] / w[k]) *
b_e[axis])
chi[k, l] = chi_t1 + chi_t2 / 4 + chi_t3 + chi_t4
chi[l, k] = chi[k, l]
return chi
def fundamental_transitions(self, chi):
"""Return the fundamental transitions in 1/cm."""
nVib = self.nVib
w = self.harm_freq
fundamental_frequencies = np.zeros((nVib,), dtype=FLOAT)
# Port this to an Einstein-sum version soon!
for r in range(nVib):
tmp = FLOAT(0)
for s in range(nVib):
if r != s:
tmp += chi[r, s]
fundamental_frequencies[r] = w[r] + 2 * chi[r, r] + tmp / 2
return fundamental_frequencies
def vibRot_constants(self):
"""
Return an array -alpha_k^beta (minus is important).
It contains the components of the vibrational-rotational constants in
1/cm.
According to eq. 12 of Amos/Handy/Jayatilaka (doi:10.1063/1.461259)
"""
fermi_resonances_overwiew = self.detect_Fermi_resonances(self.mat_D)
strong_fermi_resonances = [set(f[0]) for f in fermi_resonances_overwiew
if f[-1] == "strong"]
def check_fermi(i, j):
if set([i, j]) in strong_fermi_resonances:
return True
# Initialise constants
h = CONST.planck_constant("J*s")
c = CONST.speed_of_light() # m/s
u_to_kg = CONST.atomic_mass_constant() # kg
nVib = self.nVib
nTransRot = self.nTransRot
w = self.harm_freq
cubic = self.cubic
moI = self.geometry.rot_prop.moment_of_inertia_tensor() # u*Angs^2
moI_derivs = self.harmonic.inertia_derivatives() # u^1/2*Angs
# The moI derivative needs to be converted to the unit of cm:
moI_deriv_conv = np.pi * np.sqrt(u_to_kg * c / h) * 1e-9
cz = self.harmonic.coriolis_zeta()
b_e = self.harmonic.rot_const_inv_cm
coriolis_resonances = []
negAlpha = np.zeros((3, nVib, 4), dtype=FLOAT)
# Term 1
for k in range(nVib):
for b in range(3):
for a in range(3):
negAlpha[b, k, 0] += (1.5 * b_e[b]**2 *
moI_derivs[k, a, b]**2 /
(w[k] * moI[a, a]))
# Term 2 and 3
for k in range(nVib):
for b in range(3):
for l in range(nVib):
lz, kz = l + nTransRot, k + nTransRot
if np.abs(w[k] - w[l]) > CORIOLIS_RESONANCE_THRESH:
negAlpha[b, k, 1] += (2 * b_e[b]**2 / w[k] *
cz[b, kz, lz]**2 *
(3 * w[k]**2 + w[l]**2) /
(w[k]**2 - w[l]**2))
else:
coriolis_resonances.append((k, l))
negAlpha[b, k, 2] -= (b_e[b]**2 * cz[b, kz, lz]**2 *
(w[k] - w[l])**2 /
((w[k] + w[l]) * w[k]**2 * w[l]))
# Term 4
for k in range(nVib):
for b in range(3):
for l in range(nVib):
if not check_fermi(l, k):
# it seems that this term needs to be negative when
# compared to cfour (moI_deriv definition?)
# print("{:.9f}".format(moI_derivs[l, b, b]))
negAlpha[b, k, 3] -= (2 * b_e[b]**2 * cubic[k, k, l] *
moI_derivs[l, b, b] *
moI_deriv_conv / w[l]**1.5)
return -negAlpha, coriolis_resonances
def b_0(self, alpha):
"""Return the corrected B_0 values in 1/cm."""
b_e = self.harmonic.rot_const_inv_cm
b_0 = np.zeros((3,), dtype=FLOAT)
for a in range(3):
b_temp = 0.0
for i in range(self.nVib):
b_temp += np.sum(alpha[a, i])
b_0[a] = b_e[a] - 0.5 * b_temp
# print "{:>12,.6f} {:>12,.6f}".format(b_e[a], b_0[a])
return b_0
def generate_state(self, ijk_quanta={}):
"""
Return a list of states of length self.nVib.
Here, at all positions found in ijk_quanta, the respective amount of
quanta is inserted, e.g.:
nVib = 3, ijk_quanta = {1:2, 2:1} --> state = np.array([0, 2, 1]).
"""
state = np.zeros((self.nVib), dtype=np.int16)
for i, quanta in ijk_quanta.items():
state[i] = np.int16(quanta)
return state
def recursive_states(self, seed, n_qanta, states):
"""Recursively populate states with n_qanta."""
if n_qanta == 1:
return states
new_states = []
for element in seed:
new_states += (element + states).tolist()
states = np.array(new_states)
n_qanta -= 1
return self.recursive_states(seed, n_qanta, states)
def generate_excited_states(self, initial_state, n_quantas):
"""
Generate a list of possible excited Vibrational states.
Here we start from an initial_state (constituting excitations
of n_quanta).
"""
eye = np.eye(self.nVib, dtype=np.int)
seed = []
pm = np.array([1, -1])
for i in range(self.nVib):
for m in pm:
seed.append((m * eye[i]))
excited_states = []
concatenated = np.concatenate([pm * x for x in n_quantas])
for n_quanta in n_quantas:
for pre_state in self.recursive_states(seed, n_quanta, seed):
if np.sum(pre_state) in concatenated:
excited_state = initial_state + pre_state
if np.min(excited_state) >= 0:
if excited_state.tolist() not in excited_states:
excited_states.append(excited_state.tolist())
return excited_states
def h0vib(self, state_i):
"""
Return the energy of a harmonic transition.
I.e. <i|H_0|j>, which is only greater zero if i == j.
"""
return np.sum((FLOAT(state_i) + 0.5) * self.harm_freq)
def qn_i(self, n, i, n_quanta):
"""Determine pre-factors resulting from the integrations."""
if (n == 3 and n_quanta == 1):
return np.sqrt(9.0 / 8.0 * FLOAT(i + 1)**3)
elif (n == 2 and n_quanta == 0):
return FLOAT(i) + 0.5
elif (n_quanta == n and n_quanta > 0):
q = [FLOAT(i + j) / 2 for j in range(1, n + 1)]
return np.sqrt(np.prod(q))
else:
return 0.0
def h1vib(self, state_i, state_j):
"""Return the energy of the 1st anharm. transition, i.e. <i|H_1|j>."""
h1 = 0.0
if (len(state_i) == self.nVib and len(state_j) == self.nVib):
state_diff = np.abs(state_j - state_i)
nz = np.nonzero(state_diff)[0]
# nz: there could be up to 3 non-zero indices
if np.sum(state_diff) == 3:
if len(nz) == 1:
gs = min(state_i[nz[0]], state_j[nz[0]])
h1 += (self.qn_i(3, gs, 3) *
self.cubic[nz[0], nz[0], nz[0]] / 6)
elif len(nz) == 2:
gs = [min(state_i[nz[0]], state_j[nz[0]]),
min(state_i[nz[1]], state_j[nz[1]])]
if state_diff[nz[0]] == 2:
h1 += (self.qn_i(1, gs[1], 1) *
self.qn_i(2, gs[0], 2) *
self.cubic[nz[0], nz[0], nz[1]] / 2)
elif state_diff[nz[1]] == 2:
h1 += (self.qn_i(1, gs[0], 1) *
self.qn_i(2, gs[1], 2) *
self.cubic[nz[1], nz[1], nz[0]] / 2)
elif len(nz) == 3:
gs = [min(state_i[nz[0]], state_j[nz[0]]),
min(state_i[nz[1]], state_j[nz[1]]),
min(state_i[nz[2]], state_j[nz[2]])]
h1 += (self.qn_i(1, gs[0], 1) *
self.qn_i(1, gs[1], 1) *
self.qn_i(1, gs[2], 1) *
self.cubic[nz[0], nz[1], nz[2]])
elif np.sum(state_diff) == 1:
# print state_diff, state_i, state_j
gs = min(state_i[nz[0]], state_j[nz[0]])
for k in range(self.nVib):
# print k, nz[0], gs
if k == nz[0]:
h1 += (self.qn_i(3, gs, 1) *
self.cubic[nz[0], nz[0], nz[0]] / 6)
else:
h1 += (self.qn_i(1, gs, 1) *
self.qn_i(2, state_i[k], 0) *
self.cubic[nz[0], k, k] / 2)
else:
return 0.0
else:
sys.exit("VibRot.vpt2.h1vib(): "
"len(state_i) != len(state_j) != nVib.")
return h1
def harmonic_VPT2_derivative(self):
"""
Return the D-matrix.
This represents the harmonic derivative of the perturbative corrections
to the fundamental frequencies d (dimensionless) according to
Matthews.
doi: 10.1080/00268970902769463 (equation 3,4)
>> The routine is a bit slow for large systems, check if improvable!
"""
def kron(a, b):
if a == b:
return 1.0
else:
return 0.0
nVib = self.nVib
nTransRot = self.nTransRot
w = self.harm_freq
cz = self.harmonic.coriolis_zeta()[:, nTransRot:, nTransRot:]
b_e = self.harmonic.rot_const_inv_cm
d = np.zeros((nVib, nVib), dtype=FLOAT)
d_0 = np.zeros((nVib), dtype=FLOAT)
state_0 = self.generate_state({})
excited_states = self.generate_excited_states(state_0, [1, 3])
for a in range(nVib):
# d^0_a Term1
for b in range(nVib):
# if a == b, 1 / w[b] - w[b] / w[a]**2 is 0 (no contribution)
coriolis = 0.0
for alpha in range(3):
coriolis += (cz[alpha, a, b])**2 * b_e[alpha]
# print("{:>12.4f}".format(coriolis))
d_0[a] += 0.25 * (1 / w[b] - w[b] / w[a]**2) * coriolis
# d^0_a Term2
for excited_state_k in excited_states:
h1vib_squared = self.h1vib(state_0, excited_state_k)**2
delta_e_ik = (self.h0vib(state_0) -
self.h0vib(excited_state_k))
d_0[a] += (h1vib_squared / delta_e_ik**2) * excited_state_k[a]
# print(d_0)
# sys.exit()
for i in range(nVib):
state_i = self.generate_state({i: 1})
excited_states = self.generate_excited_states(state_i, [1, 3])
for a in range(nVib):
# Term 1
for b in range(nVib):
if a != b:
coriolis = 0.0
for alpha in range(3):
coriolis += (cz[alpha, a, b])**2 * b_e[alpha]
weighting = (kron(i, a) + 0.5) * (kron(i, b) + 0.5)
d[i][a] += (weighting *
(1 / w[b] - w[b] / w[a]**2) * coriolis)
# Term 2
for excited_state_k in excited_states:
h1vib_squared = self.h1vib(state_i, excited_state_k)**2
delta_e_ik = (self.h0vib(state_i) -
self.h0vib(excited_state_k))
if not (h1vib_squared == 0.0 or np.abs(delta_e_ik) < 1e-3):
d[i][a] -= ((kron(i, a) - excited_state_k[a]) *
h1vib_squared / delta_e_ik ** 2)
# Term 3
d[i][a] -= d_0[a]
return d
def zero_point_energy(self, anharmonic_constants):
"""Generate the zero point vibrational energy in 1/cm."""
nVib = self.nVib
nTransRot = self.nTransRot
w = self.harm_freq
cubic = self.cubic
semiquartic = self.semiquartic
cz = self.harmonic.coriolis_zeta()
b_e = self.harmonic.rot_const_inv_cm
harmonic_zpe = 0.5 * np.sum(w)
anharmonic_zpe = 0.0
for i in range(nVib):
for j in range(nVib):
if i >= j:
anharmonic_zpe += 0.25 * anharmonic_constants[i, j]
# Term 1
zpe = -0.25 * np.sum(b_e)
for k in range(nVib):
# Term 2
zpe += semiquartic[k, k, k] / 64.0
# Term 3
zpe += -7.0 * cubic[k, k, k]**2 / (576.0 * w[k])
# Term 4
for l in range(nVib):
if l != k:
zpe += ((3.0 * w[l] * cubic[k, k, l]**2) /
(64.0 * (4.0 * w[k]**2 - w[l]**2)))
# Term 5
for l in range(nVib):
for m in range(nVib):
if (k < l and l < m and k < m):
zpe -= ((cubic[k, l, m]**2 * w[k] * w[l] * w[m]) /
(4.0 * ((w[k] + w[l] + w[m]) *
(w[k] - w[l] - w[m]) *
(w[k] + w[l] - w[m]) *
(w[k] - w[l] + w[m]))))
# Term 6
for l in range(nVib):
if k != l:
lz, kz = l + nTransRot, k + nTransRot
for axis in range(3):
zpe -= 0.125 * b_e[axis] * cz[axis, kz, lz]**2
return zpe + harmonic_zpe + anharmonic_zpe
def detect_Fermi_resonances(self, mat_D):
"""
Return Fermi-resonances in an automaitic manner.
It analyses the harmonic derivative of the perturbative corrections to
the fundamental frequencies d_mat and retrun resonant states as well
as their harmonic frequencies.
Two cases:
strong: w_i ~ 2 w_j (D[i,i] = -X, D[i,j] = 2X)
weak: w_i ~ w_j + w_k (D[i,i] = -X, D[i,j] = X, D[i,k] = X)
"""
cTresh = 1 # np.around threshold
def d_approx(i, j):
return np.around(mat_D[i, i], cTresh)
fermi_resonances = []
# The following returns a list of indexes for which |D[i,i]| > 0.5
relevant_D = np.nonzero(np.greater(np.abs(np.diag(mat_D)), 0.5))[0]
w = self.harm_freq
for i in relevant_D:
d_index = np.nonzero(np.greater(np.abs(mat_D[i]), 0.5))[0]
d_index = [j for j in d_index if i != j]
if not d_index:
continue
if len(d_index) == 1:
# strong Fermi resonance
j = d_index[0]
f = [[i, j], [w[i], w[j]], [{j: 2}, {i: 1}],
[-mat_D[i, i], mat_D[i, j] / 2], "strong"]
fermi_resonances.append(f)
elif len(d_index) == 2:
j, k = d_index[0], d_index[1]
xTest = -d_approx(i, i)
if (xTest == d_approx(i, j) and xTest == d_approx(i, k)):
f = [[i, j, k], [w[i], w[j], w[k]], [{j: 1, k: 1}, {i: 1}],
[-mat_D[i, i], mat_D[i, j], mat_D[i, k]], "weak"]
fermi_resonances.append(f)
else:
sys.exit("VibRot.vpt2.detect_Fermi_resonances(): "
"Matrix D seems too complicated.")
return fermi_resonances
# return []
def effective_hamiltonian(self, anharmonic_const):
"""
Return an effective Hamiltonian constructed from Fermi resonances.
This is work in progress....not yet usable.
"""
resonances = self.detect_Fermi_resonances(self.mat_D)
zpe_anharm = self.zero_point_energy(anharmonic_const)
for resonance in resonances:
for raw_state in resonance[2]:
state = self.generate_state(raw_state)
h0 = self.h0vib(state)
print(zpe_anharm, h0, h0 - zpe_anharm)
# class VibRotErrors(Exception):
# """Base class for exceptions in this module."""
# def __init__(self, value):
# self.value = value
# def __str__(self):
# return repr(self.value)
|
jdcapa/MolecularToolbox
|
moleculartoolbox/vpt2.py
|
Python
|
gpl-3.0
| 33,026
|
[
"CFOUR",
"ORCA"
] |
1ba5c93ed4fc5d861135aedfe46a12b157aeed4141388c97a35154eaad25efa0
|
# -*- coding: utf-8 -*-
###############################################################################
# #
# Ambrosia - a tool to visualize ANANAS results #
# #
# Copyright (C) 2015 Wolfgang Ettlinger and the ANANAS Team #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# the ANANAS Project Copyright (C) 2015 #
# #
###############################################################################
import json
import re
import datetime
import binascii
import socket
import struct
import dateutil.parser
import ambrosia
from ambrosia.plugins import PluginInfoTop
from ambrosia.util import get_logger, join_command
from ambrosia import model, Correlator
from ambrosia.context import AmbrosiaContext
from ambrosia.model.entities import Task, File, App, ServerEndpoint
from ambrosia_plugins.events import ANANASEvent
from ambrosia_plugins.lkm.events import SyscallEvent, CommandExecuteEvent, FileDescriptorEvent, FileAccessEvent, \
SocketEvent, SocketAcceptEvent, MemoryMapEvent, StartTaskEvent, SuperUserRequestEvent, CreateDirEvent, SendSignalEvent, \
DeletePathEvent, ExecEvent, ANANASAdbShellExecEvent, AnonymousFileAccessEvent, UnknownFdEvent, LibraryLoadEvent, JavaLibraryLoadEvent, \
ZygoteForkEvent, APKInstallEvent, MountEvent
__author__ = 'Wolfgang Ettlinger'
class PluginInfo(PluginInfoTop):
@staticmethod
def correlators():
return [
(SyscallCorrelator, 10), # basic correlation
(FileAccessEventCorrelator, 20), # classifies file events
(CommandExecuteCorrelator, 30), # finds command executions
(AdbCommandCorrelator, 40), # correlates command executions with adb commands
(InstallCorelator, 50) # find APK installations
]
@staticmethod
def parsers():
return [LkmPluginParser]
class LkmPluginParser(ambrosia.ResultParser):
"""Parses the *process*, *syscalltrace* and *appinfo* elements of the result set.
"""
def __init__(self):
super(LkmPluginParser, self).__init__()
self.processes = {}
self.log = get_logger(self)
def parse(self, name, el, context):
"""Does the actual parsing.
* *process* element: All processes reported by the LKM/ANANAS are parsed and
:class:`ambrosia_web.model.entities.Task` entities are created. Moreover, the attributes
* *ananas_id* (id in the ANANAS db)
* *parent_id* (the ANANAS db id of the parent task)
* *comm* (description of the process in thekernel)
* *path* (of the executable)
* *type* (the type of the task ANANAS figured out)
* *fds* (a dict of all file descriptors and the path during LKM load)
* *tdgid* (the PID of the task group leader)
* *tg_leader_id* (The ANANAS db id of the thread group leader)
* *syscalltrace* element: A :class:`ambrosia_plugins.lkm.events.SyscallEvent` event is create for each syscall
using all the information ANANAS provides. Moreover the :class:`ambrosia_web.clocks.ClockSyncer`.translate_table
attribute is filled. ANANAS records two timestamps for each syscall. There is a *normal* timestamp (which is
the system time when the syscall returned) and the *monotonic* timestamp (which is the time that passed since
the system booted). When the system clock is not changed, the *monotonic* and the *normal* clock are in sync
(e.g. if 10 seconds pass on one clock 10 seconds pass on the other clock). Therefore the *normal* clock is
ahead of the *monotonic* clock (a constant offset = the time the emulator booted). By calculating the
*normal* clock minus the *monotonic* clock we always get this offset. When this offset changes, the system
clock has been altered.
This algorithm is implemented using the following variables:
* boot_time: the actual time the emulator is booted (calculated *normal* - *monotonic* time on the first
syscall = when emulator time and host time are still in sync)
* error: how much the expected offset (boot_time) is off from the acutal offset (*normal* - *monotonic*).
This is also the error of the emulator clock (compared to the host clock)
* adjtime: the adjusted time (the captured *normal* time - error).
* lasterror: the error of the last syscall. If the error of two consecutive syscall changes, we know that
the system clock has been altered (and we need to make an entry in
:class:`ambrosia_web.clocks.ClockSyncer`.translate_table). The comparison sees two errors that are at a
maximum of 1 second apart as a clock change. This is because the error is not absolutely precise (the
*monotonic* and *normal* timestamps are not captured at exactly the same time, even a context switch may
happen in between).
* *appinfo* element: A :class:`ambrosia_web.model.entities.App` entity is created for each app in the report.
"""
assert isinstance(context, AmbrosiaContext)
analysis = context.analysis
if name == 'processes':
self.log.info('Parsing process-tag')
for p in el:
props = p.attrib.copy().items()
props += p.find('info').attrib.items()
props = dict(props)
start = end = None
if 'start' in props:
start = dateutil.parser.parse(props['start'])
if 'end' in props:
end = dateutil.parser.parse(props['end'])
props['fds'] = {}
for fdel in p.findall('fds/fd'):
props['fds'][int(fdel.attrib['number'])] = fdel.attrib['path']
proc = analysis.get_entity(context,
Task,
int(props['pid']),
start,
end)
proc.ananas_id = int(props['id'])
proc.parent_id = int(props['parentId'])
proc.comm = json.loads(props['comm'])
# TODO fix double-json in ANANAS
proc.path = json.loads(json.loads(props['path']))
proc.execfiles = set()
for ep in proc.path:
proc.execfiles.add(
analysis.get_entity(
context,
File,
ep
)
)
proc.type = props['type']
proc.fds = props['fds']
if props['tgid'] != 'None':
proc.tgid = int(props['tgid'])
try:
proc.tg_leader_id = int(props['threadgroup-leader'])
except ValueError:
# tg-leader is None
pass
try:
proc.uid = int(props['uid'])
except ValueError:
# uid is 'None'
pass
self.processes[proc.ananas_id] = proc
elif name == 'syscalltrace':
self.log.info('Parsing syscalltrace-tag')
boot_time = None
lasterror = None
idx = 1
for sc in el:
props = sc.attrib.copy().items()
props += sc.find('info').attrib.items()
props = dict(props)
props['returnval'] = int(sc.find('return').text)
props['processid'] = int(props['processid'])
params = []
for param in sc.findall('param'):
if param.text is None:
params.append('')
else:
params.append(param.text)
time = dateutil.parser.parse(props['time'])
props['params'] = params
infos = sc.findall('addinfo')
props['add_info'] = {}
for info in infos:
info_name = info.attrib['name']
if info_name not in props['add_info']:
props['add_info'][info_name] = []
text = info.text
if text is None:
text = ''
props['add_info'][info_name].append(text)
spawned_child = None
if 'child_id' in props:
spawned_child = self.processes[int(props['child_id'])]
target_task = None
if 'target_task_id' in props:
target_task = self.processes[int(props['target_task_id'])]
mt = float(props['monotonic_time'])
# calculate boot_time on first syscall
if boot_time is None:
boot_time = time - datetime.timedelta(0, mt)
error = time - datetime.timedelta(0, mt) - boot_time
adjtime = time - error
if lasterror is None or _timedelta_diff(lasterror, error) > datetime.timedelta(0, 1):
# add 1 for safety to definitely get all events
offset = datetime.timedelta(0, 1)
context.clock_syncer.translate_table.append((time - offset, error))
lasterror = error
syscall_event = SyscallEvent(context,
props,
adjtime,
mt,
self.processes[props['processid']],
idx,
spawned_child,
target_task)
idx += 1
analysis.add_event(syscall_event)
elif name == 'appinfo':
self.log.info('Parsing appinfo-tag')
for ap in el:
appinfo = analysis.get_entity(context, App, unicode(ap.attrib['package']))
appinfo.uid = int(ap.attrib['uid'])
appinfo.apk_path = unicode(ap.attrib['apk-path'])
appinfo.native_lib_path = unicode(ap.attrib['native-lib-path'])
appinfo.version = unicode(ap.attrib['version'])
def finish(self, context):
"""Calculate additional information for each process.
This method is executed after all processes have been parsed. This allows to reliably reference other processes
(E.g. when the first process is being parsed no other proccess is known, therefore no other process can be
referenced). The method sets the tg_leader and the parent. Moreover, it copies the reference to *fds* from the
parent for all threads (in linux a thread *normally* shares FDs with its thread group leader).
"""
appuids = {}
for app in context.analysis.iter_entities(context, App):
if app.uid not in appuids:
appuids[app.uid] = set()
appuids[app.uid].add(app)
for ananas_id, proc in self.processes.iteritems():
assert isinstance(proc, Task)
if proc.parent_id != -1:
proc.parent = self.processes[proc.parent_id]
if proc.tg_leader_id is not None:
proc.tg_leader = self.processes[proc.tg_leader_id]
if not proc.is_process:
# threads do not heave any files
assert len(proc.fds) == 0
if proc.tg_leader is not None:
proc.fds = proc.tg_leader.fds
else:
if len(proc.fds) == 0 and proc.type != 'KERNEL' and not proc.start_captured:
# non-kernel processes with no FDs but that existed
# during fd-listing are strange
self.log.warn("Process {} does not have any FDs".format(proc))
if proc.uid in appuids:
proc.apps = appuids[proc.uid]
for ananas_id, proc in self.processes.iteritems():
assert proc.tg_leader is None or proc.tg_leader.is_process
def _timedelta_diff(td1, td2):
assert isinstance(td1, datetime.timedelta)
assert isinstance(td2, datetime.timedelta)
if td1 < td2:
return td2 - td1
else:
return td1 - td2
class SyscallCorrelator(ambrosia.Correlator):
"""Wraps primitive events into higher-level events
"""
def __init__(self, context):
assert isinstance(context, AmbrosiaContext)
super(SyscallCorrelator, self).__init__(context)
self.fd_directory = {}
self._generate_start_fd_directory()
def _generate_start_fd_directory(self):
"""Generates the initial fd directory.
Before the correlation is started the fd directory is filed with file descriptor events of processes that
existed before the LKM was loaded.
"""
for proc in self.context.analysis.iter_entities(self.context, Task):
assert isinstance(proc, Task)
if proc.start_captured:
# process did not exist on lkm load -> no fd listing available
continue
fds = {}
for fd, path in proc.fds.iteritems():
if path.startswith('socket:'):
new_event = SocketEvent(proc, True)
elif path.startswith('anon_inode:') or path.startswith('pipe:'):
new_event = AnonymousFileAccessEvent(path, proc, self.context)
elif path.startswith('/'):
if path.endswith(' (deleted)'):
# kernel appends ' (deleted)' for deleted files
path = path[0:-10]
new_event = FileAccessEvent(
self.context.analysis.get_entity(
self.context,
File,
path),
None,
None,
proc,
True)
else:
self.log.warn('Unknown path: "{}"'.format(path))
continue
fds[fd] = new_event
self.fd_directory[proc] = fds
def _is_success(self, val):
return val >= 0 or val == -115
# -115: EINPROGRESS
def _get_fd_event(self, fd, process, success, logname, clazz=None, default_start_ts=None):
"""Get an fd event from the a fd directory entry.
The fd directory (`fd_directory`) is a dict in the form of
.. code-block:: python
{
pid: {
fd_number: fd_event,
...
},
...
}
The fd directory represents all file descriptors of the emulator **at a specific point in time**. This means
that the fd directory is constantly changed as syscalls are being processed (e.g. open() creates an entry, close
removes an entry).
If (for some reason) the fd is not found, this method returns an
:class:`ambrosia_plugins.lkm.events.UnknownFdEvent`.
Note:
One value of the fd dictionary dict may be stored under multiple pid keys since tasks (especially threads)
may share file descriptors.
Args:
fd (int): the file descriptor number we are searching for
process (ambrosia_web.model.entities.Task): the task the fd belongs to
clazz (class): (optional) only return an event of this type
default_start_ts (datetime.datetime): if this fd is unknown, return an event with this start timestamp
"""
assert isinstance(process, Task)
assert isinstance(logname, basestring)
proc_fds = self.fd_directory[process]
if fd not in proc_fds:
if success:
self.log.warn("{} operation on unknown fd, process {}, fd {}".format(logname, process, fd))
fdevt = UnknownFdEvent(process, fd, success)
if default_start_ts is not None:
fdevt.start_ts = default_start_ts
proc_fds[fd] = fdevt
self.to_add.add(fdevt)
res = proc_fds[fd]
if clazz is not None:
if not isinstance(res, clazz):
return
return res
def _get_del_fd_event(self, fd, process, success, logname, clazz=None):
"""Gets an fd event from the fd directory and deletes it.
Args:
fd (int): the file descriptor number we are searching for
process (ambrosia_web.model.entities.Task): the task the fd belongs to
clazz (class): (optional) only return an event of this type
process (ambrosia_web.model.entities.Task): the task the fd belongs to
"""
proc_fds = self.fd_directory[process]
evt = self._get_fd_event(fd, process, success, logname, clazz)
if evt is None:
return
del proc_fds[fd]
return evt
def _get_dup(self, evt, oldfd, newfd, process):
"""Duplicate an fd (dup and dup2 syscalls)
Args:
evt (ambrosia_web.model.Event): the dup syscall event
oldfd (int): the old file descriptor number
newfd (int): the new file descriptor number
"""
assert isinstance(evt, model.Event)
proc_fds = self.fd_directory[process]
success = self._is_success(evt.returnval)
if oldfd not in proc_fds:
if success:
self.log.warn("dup on an unknown fd, process {}, fd {}".format(process, oldfd))
fdevt = UnknownFdEvent(process, oldfd, success)
proc_fds[oldfd] = fdevt
self.to_add.add(fdevt)
fevt = proc_fds[oldfd]
if success:
proc_fds[newfd] = fevt
return fevt
def correlate(self):
self.log.info('Generating events from syscalls')
for evt in self.context.analysis.iter_events(self.context, cls=SyscallEvent, key='index'):
self._check_syscall(evt)
self.update_tree()
def _parse_addr_str(self, addrstr, socket_evt):
"""Parse the struct sockaddr structure passed to bind and connect syscalls.
Args:
addrstr (str): the hexascii representation of the struct sockaddr
oldfd (ambrosia_plugins.lkm.events.SocketEvent): the socket event the struct should be parsed for
Returns:
an :class:`ambrosia.model.Entity` that represents the address
"""
assert isinstance(socket_evt, SocketEvent)
raw = binascii.unhexlify(addrstr)
sa_family = struct.unpack("<H", raw[0:2])[0]
assert socket_evt.address_family == sa_family
entity = None
if sa_family == 1: # AF_UNIX # TODO
# struct sockaddr_un
address = raw[2:].rstrip('\x00')
if address[0] == '\x00':
entity = self.context.analysis.get_entity(
self.context,
ServerEndpoint,
'unix',
address,
0)
else:
entity = self.context.analysis.get_entity(self.context, File, address)
elif sa_family == 2: # AF_INET TODO
# sockaddr_in
port = struct.unpack("<H", raw[2:4])[0]
addr = socket.inet_ntoa(raw[4:8])
if socket_evt.socket_type == 1: # TODO SOCK_STREAM
protocol = 'tcp'
elif socket_evt.socket_type == 2: # TODO SOCK_DGRAM
protocol = 'udp'
else:
protocol = 'unknown'
entity = self.context.analysis.get_entity(
self.context,
ServerEndpoint,
protocol,
addr,
port)
elif sa_family == 10: # TODO AF_INET6
# sockaddr_in6
port = struct.unpack("<H", raw[2:4])[0]
addr = socket.inet_ntop(socket.AF_INET6, raw[4:20])
if socket_evt.socket_type == 1: # TODO SOCK_STREAM
protocol = 'tcp6'
elif socket_evt.socket_type == 2: # TODO SOCK_DGRAM
protocol = 'udp6'
else:
protocol = 'unknown'
entity = self.context.analysis.get_entity(
self.context,
ServerEndpoint,
protocol,
addr,
port)
return entity
def _create_fd_dir_entry(self, proc):
if proc.tg_leader in self.fd_directory:
# threadgroup is known -> fds are inherited
self.fd_directory[proc] = self.fd_directory[proc.tg_leader]
elif proc.is_process and proc.parent in self.fd_directory:
# its a new process and parent is known -> copy fd table
self.fd_directory[proc] = self.fd_directory[proc.parent].copy()
else:
self.log.warn("task without known threadgroup or parent: {}".format(proc))
self.fd_directory[proc] = {}
def _check_syscall(self, evt):
"""Wraps a single syscall event into a higher-level event
Args:
evt (ambrosia_plugins.lkm.events.SyscallEvent): the syscall event
"""
assert isinstance(evt, SyscallEvent)
proc = evt.process
parent_evt = None
assert isinstance(proc, Task)
if proc not in self.fd_directory:
# we have a syscall but the fork() has not yet returned in the parent
# since the parent is currently in the middle of a fork() this should be a good time to copy the fd
# directory
self._create_fd_dir_entry(proc)
proc_fds = self.fd_directory[proc]
if evt.name == "open" or evt.name == "creat":
if evt.name == "creat":
flags = 0
mode = int(evt.params[1])
else:
flags = int(evt.params[1])
mode = int(evt.params[2])
parent_evt = FileAccessEvent(
self.context.analysis.get_entity(
self.context,
File,
evt.params[0]),
flags,
mode,
proc,
self._is_success(evt.returnval))
if parent_evt.successful:
proc_fds[evt.returnval] = parent_evt
self.to_add.add(parent_evt)
elif evt.name == "epoll_create" or evt.name == "epoll_create1":
parent_evt = AnonymousFileAccessEvent("epoll", proc, self.context, self._is_success(evt.returnval))
if parent_evt.successful:
proc_fds[evt.returnval] = parent_evt
self.to_add.add(parent_evt)
elif evt.name == "socket":
parent_evt = SocketEvent(
proc,
self._is_success(evt.returnval))
parent_evt.address_family = int(evt.params[0])
parent_evt.socket_type = int(evt.params[1])
if parent_evt.successful:
proc_fds[evt.returnval] = parent_evt
self.to_add.add(parent_evt)
elif evt.name == "pipe" or evt.name == "pipe2":
fd1 = int(evt.params[0])
fd2 = int(evt.params[1])
parent_evt = AnonymousFileAccessEvent('pipe', proc, self.context,
self._is_success(evt.returnval) and fd1 >= 0 and fd2 >= 0)
if parent_evt.successful:
proc_fds[fd1] = parent_evt
proc_fds[fd2] = parent_evt
self.to_add.add(parent_evt)
elif evt.name == "accept":
parent_evt = SocketAcceptEvent(
proc,
self._is_success(evt.returnval))
mainsocket = self._get_fd_event(int(evt.params[0]), proc, parent_evt.successful, "accept")
if parent_evt.successful:
proc_fds[evt.returnval] = parent_evt
assert isinstance(mainsocket, SocketEvent)
mainsocket.server_socket = True
mainsocket.add_child(parent_evt)
self.to_add.add(mainsocket)
elif evt.name == "connect":
parent_evt = self._get_fd_event(int(evt.params[0]), proc, self._is_success(evt.returnval), "connect")
if self._is_success(evt.returnval) and isinstance(parent_evt, SocketEvent):
parent_evt.connected_to = self._parse_addr_str(evt.params[1], parent_evt)
parent_evt.client_socket = True
elif evt.name == "bind":
parent_evt = self._get_fd_event(int(evt.params[0]), proc, self._is_success(evt.returnval), "bind")
if self._is_success(evt.returnval):
assert isinstance(parent_evt, SocketEvent)
parent_evt.bound_to = self._parse_addr_str(evt.params[1], parent_evt)
parent_evt.server_socket = True
elif evt.name == "listen":
parent_evt = self._get_fd_event(int(evt.params[0]), proc, self._is_success(evt.returnval), "listen")
elif evt.name == "fchown32":
parent_evt = self._get_fd_event(int(evt.params[0]), proc, self._is_success(evt.returnval), "fchown32")
elif evt.name == "read" or \
evt.name == "write" or \
evt.name == "send" or \
evt.name == "sendto" or \
evt.name == "sendmsg" or \
evt.name == "recvfrom" or \
evt.name == "recvmsg":
parent_evt = self._get_fd_event(int(evt.params[0]), proc, self._is_success(evt.returnval), evt.name)
elif evt.name == "close":
parent_evt = self._get_del_fd_event(int(evt.params[0]), proc, self._is_success(evt.returnval), "close")
elif evt.name == "dup":
parent_evt = self._get_dup(evt, int(evt.params[0]), evt.returnval, proc)
elif evt.name == "dup2":
parent_evt = self._get_dup(evt, int(evt.params[0]), int(evt.params[1]), proc)
elif evt.name == "mmap2":
fd = int(evt.params[4])
flags = int(evt.params[3])
address = int(evt.returnval)
parent_evt = MemoryMapEvent(flags, fd, address, proc, evt.returnval, evt.end_ts, evt.end_ts)
if 'MAP_ANONYMOUS' not in parent_evt.flags:
fdevt = self._get_fd_event(fd, proc, parent_evt.successful, "mmap2", FileDescriptorEvent, evt.start_ts)
fdevt.add_child(parent_evt)
else:
self.to_add.add(parent_evt)
elif evt.name == "clone" or evt.name == "fork" or evt.name == "vfork":
if evt.returnval < 0:
return
pid = evt.returnval
if pid < 0:
pid = None
assert evt.spawned_child.start_captured
parent_evt = StartTaskEvent(evt.end_ts, evt.end_ts, proc, pid, evt.spawned_child)
self.to_add.add(parent_evt)
if evt.spawned_child not in self.fd_directory:
# the process hasn't done any syscalls (is not a "ghost process")
self._create_fd_dir_entry(evt.spawned_child)
elif evt.name == "execve":
parent_evt = ExecEvent(evt.start_ts, evt.end_ts, evt.params[0], evt.argv, evt.env, proc)
self.to_add.add(parent_evt)
elif evt.name == "unlink" or evt.name == "rmdir":
parent_evt = DeletePathEvent(evt.start_ts,
evt.end_ts,
self._is_success(evt.returnval),
self.context.analysis.get_entity(
self.context,
File,
evt.params[0]),
proc)
self.to_add.add(parent_evt)
elif evt.name == "mkdir":
parent_evt = CreateDirEvent(evt.start_ts,
evt.end_ts,
proc,
self._is_success(evt.returnval),
self.context.analysis.get_entity(
self.context,
File,
evt.params[0]))
self.to_add.add(parent_evt)
elif evt.name == "kill" or evt.name == "tgkill":
parent_evt = SendSignalEvent(evt.start_ts,
evt.end_ts,
int(evt.params[1]),
proc,
evt.target_task)
self.to_add.add(parent_evt)
elif evt.name == "mount":
parent_evt = MountEvent(
self.context.analysis.get_entity(
self.context,
File,
evt.params[0]),
self.context.analysis.get_entity(
self.context,
File,
evt.params[1]),
evt.params[2],
evt.params[3],
evt.params[3],
proc,
self._is_success(evt.returnval))
self.to_add.add(parent_evt)
# TODO exit
if parent_evt is not None:
assert isinstance(parent_evt, model.Event)
parent_evt.add_child(evt)
self.to_remove.add(evt)
class FileAccessEventCorrelator(Correlator):
"""
Finds library load events (mmap to \*.so files) and Java library loads
"""
def correlate(self):
for fe in self.context.analysis.iter_events(self.context, FileAccessEvent):
if re.match('^/vendor/lib/.+\.so', fe.abspath) or re.match('^/system/lib/.+\.so', fe.abspath):
lle = LibraryLoadEvent(fe.file, fe.process, False)
if fe.successful:
for c in fe.children:
if isinstance(c, MemoryMapEvent):
# successful library loads need a mmap
lle.successful = True
break
lle.add_child(fe)
self.to_add.add(lle)
self.to_remove.add(fe)
elif re.match('.+\.(jar|odex|apk)$', fe.abspath) and fe.flags_val == 131072:
system_library_load = (
bool(re.match('^/system/framework/.+\.(jar|odex)', fe.abspath))
or bool(re.match('^/system/(priv-)?app/.+\.(apk|odex)', fe.abspath)))
jll = JavaLibraryLoadEvent(fe.file, fe.process, False, system_library_load)
jll.add_child(fe)
self.to_add.add(jll)
self.to_remove.add(fe)
self.update_tree()
class CommandExecuteCorrelator(Correlator):
"""Finds events that form the execution of a command.
* :class:`ambrosia_plugins.lkm.events.StartTaskEvent`: indicate the creation of a new process
* :class:`ambrosia_plugins.lkm.events.ExecEvent`: commands are started using a fork-and-exec
* :class:`ambrosia_plugins.lkm.events.LibraryLoad`: shortly after a fork indicates that a library is loaded that
is essential to run the command.
* :class:`ambrosia_plugins.lkm.events.FileAccessEvent`: several file events happen at the begin of a command execution
"""
def correlate(self):
self.log.info('Searching for command executions')
for fork in self.context.analysis.iter_events(self.context, StartTaskEvent):
exec_ = None
mintimediff = None
execs_to_add = set()
if not fork.is_process:
continue
exes = list(self.context.analysis.iter_events(self.context, ExecEvent, 'process',
value=fork.spawned_child))
for exe in exes:
assert isinstance(exe, ExecEvent)
timediff = exe.end_ts - fork.end_ts
if mintimediff is None or timediff < mintimediff:
mintimediff = timediff
exec_ = exe
if exec_ is not None and mintimediff < datetime.timedelta(0, 0, 0, 1000):
# a fork-and-exec should not take longer then 1000ms
# find additional execs: search whole $PATH for actual executeable
lastexe = exec_
for exe in exes:
timediff = exe.end_ts - exec_.end_ts
if timediff < datetime.timedelta(0, 0, 0, 500):
execs_to_add.add(exe)
if exe.end_ts > lastexe.end_ts:
lastexe = exe
# we use the argv of the first execve. e.g. sh -c 'xxx' instead of xxx
cmd_evt = CommandExecuteEvent(
lastexe.path,
exec_.argv,
fork.spawned_child,
self.context.analysis.get_entity(
self.context,
File,
lastexe.path))
cmd_evt.add_child(fork)
self.to_remove.add(fork)
for e in execs_to_add:
cmd_evt.add_child(e)
self.to_remove.add(e)
self.log.debug("Found command event: {}".format(cmd_evt))
if cmd_evt.path == '/system/xbin/su':
su_evt = SuperUserRequestEvent(cmd_evt.start_ts, cmd_evt.end_ts, cmd_evt.process)
su_evt.add_child(cmd_evt)
self.to_add.add(su_evt)
self.log.debug("Found SU event: {}".format(su_evt))
else:
self.to_add.add(cmd_evt)
self._find_file_events(fork.spawned_child,
cmd_evt,
fork.start_ts,
lambda fe:
fe.abspath == '/proc/mounts' or
fe.abspath == '/proc/filesystems' or
fe.abspath == '/' or
re.match('/acct/uid/\d+/tasks', fe.abspath) or
fe.abspath == '/proc/' + str(fork.spawned_child.pid) + '/oom_adj')
self._find_mkdir_events(fork.spawned_child, cmd_evt, fork.start_ts)
self._find_library_loads(fork.spawned_child, cmd_evt, fork.start_ts)
self._find_java_library_loads(fork.spawned_child, cmd_evt, fork.start_ts)
elif fork.process.type == "ZYGOTE" and (fork.spawned_child.type == "ZYGOTE_CHILD"
or fork.spawned_child.type == "TARGET_APP"):
zfe = ZygoteForkEvent(fork.spawned_child)
zfe.add_child(fork)
self.to_remove.add(fork)
self._find_file_events(fork.spawned_child,
zfe,
fork.start_ts,
lambda fe:
re.match('/acct/uid/\d+/tasks', fe.abspath) or
fe.abspath == "/dev/cpuctl/apps/tasks" or
fe.abspath == "/dev/cpuctl/apps/bg_non_interactive/tasks" or
fe.abspath == "/sys/qemu_trace/process_name" or
fe.abspath == "/dev/binder")
self._find_mkdir_events(fork.spawned_child, zfe, fork.start_ts)
self.to_add.add(zfe)
self.update_tree()
def _find_file_events(self, process, evt, start_ts, matches):
for fe in self.context.analysis.iter_events(self.context, FileAccessEvent, 'process', value=process):
assert isinstance(fe, FileAccessEvent)
if (fe.start_ts - start_ts) > datetime.timedelta(0, 2):
# we consider everything within 2 seconds as startup
continue
if matches(fe):
# startup stuff
evt.add_child(fe)
self.to_remove.add(fe)
def _find_mkdir_events(self, process, evt, start_ts):
for mde in self.context.analysis.iter_events(self.context, CreateDirEvent, 'process', value=process):
assert isinstance(mde, CreateDirEvent)
if (mde.start_ts - start_ts) > datetime.timedelta(0, 2):
# we consider everything within 2 seconds as startup
continue
if mde.file.abspath == "/acct/uid/"+str(process.uid):
evt.add_child(mde)
self.to_remove.add(mde)
def _find_library_loads(self, process, evt, start_ts):
for lle in self.context.analysis.iter_events(self.context, LibraryLoadEvent, 'process', value=process):
if (lle.start_ts - start_ts) > datetime.timedelta(0, 2):
# we consider everything within 2 seconds as startup
continue
evt.add_child(lle)
self.to_remove.add(lle)
def _find_java_library_loads(self, process, evt, start_ts):
for jlle in self.context.analysis.iter_events(self.context, JavaLibraryLoadEvent, 'process', value=process):
if (jlle.start_ts - start_ts) > datetime.timedelta(0, 2):
# we consider everything within 2 seconds as startup
continue
evt.add_child(jlle)
self.to_remove.add(jlle)
class AdbCommandCorrelator(Correlator):
"""Find command executions that happen because of ANANAS (through ADB)
"""
def correlate(self):
self.log.info('Correlating ADB commands with command executions')
found_matches = {}
for cmd_evt in self.context.analysis.iter_events(self.context, CommandExecuteEvent):
if ['/system/bin/sh', '-c'] != cmd_evt.command[:2]:
# adb commands are started using /system/bin/sh
continue
if cmd_evt.process.type != 'ADBD_CHILD':
continue
cmd_str = cmd_evt.command[2]
for adb_cmd in self.context.analysis.iter_events(self.context, ANANASEvent, 'start_ts',
min_value=cmd_evt.start_ts - datetime.timedelta(0, 1)):
if adb_cmd.name != 'adb_cmd':
continue
if adb_cmd in found_matches:
continue
if 'shell' in adb_cmd.params:
idx = adb_cmd.params.index('shell')
cmd = adb_cmd.params[idx+1:]
if len(cmd) > 1:
cmd = join_command(cmd)
else:
cmd = cmd[0]
if cmd == cmd_str:
found_matches[adb_cmd] = cmd_evt
break
for adb_cmd, cmd_evt in found_matches.iteritems():
self.to_remove.add(adb_cmd)
self.to_remove.add(cmd_evt)
ase = ANANASAdbShellExecEvent(cmd_evt.process)
ase.add_child(adb_cmd)
ase.add_child(cmd_evt)
self.log.debug("Found ANANAS shell exec: {}".format(cmd_evt))
self.to_add.add(ase)
'''
for evt in self.context.analysis.iter_all_events(self.context, 'process', value=cmd_evt.process):
if evt == cmd_evt:
continue
self.to_remove.add(evt)
ase.add_child(evt)
'''
self.update_tree()
class InstallCorelator(Correlator):
""" Finds app installations
"""
def correlate(self):
self.log.info('Correlating App installations')
for cmd_evt in self.context.analysis.iter_events(self.context, CommandExecuteEvent):
if ['/system/bin/sh', '-c'] != cmd_evt.command[:2]:
# TODO can a app be installed differently?
continue
cmd_str = cmd_evt.command[2]
if not cmd_str.startswith('pm install'):
continue
apk = cmd_str.split(' ')[2]
if '/system/bin/pm' not in cmd_evt.process.path:
continue
self.to_remove.add(cmd_evt)
ai = APKInstallEvent(self.context.analysis.get_entity(self.context, File, apk),
cmd_evt.process)
ai.add_child(cmd_evt)
self.log.debug("Found APK installation: {}".format(cmd_evt))
self.to_add.add(ai)
for evt in self.context.analysis.iter_all_events(self.context, 'process', value=cmd_evt.process):
if evt == cmd_evt:
continue
self.to_remove.add(evt)
ai.add_child(evt)
self.update_tree()
|
MalwareLabHagenberg/ambrosia
|
ambrosia_plugins/lkm/__init__.py
|
Python
|
gpl-3.0
| 42,929
|
[
"ASE"
] |
6cf661440d8895960086e96b30782cb026bd3dda5ef7564f5734b27f017aee24
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
# #############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(np.full(n_samples - n_outliers, 1.26),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(np.full(n_outliers, 2.26),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(np.full(n_samples - n_outliers, 1.26),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(np.full(n_outliers, 2.26),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
chrsrds/scikit-learn
|
examples/covariance/plot_mahalanobis_distances.py
|
Python
|
bsd-3-clause
| 6,228
|
[
"Gaussian"
] |
ccf26a44d087b11b7032fd0ca8f5dca9d5af1ddc7799a81b9524edfa3e0efc7e
|
from math import exp, sqrt
import numpy as np
from numpy.random import default_rng
from pysisyphus.constants import KBAU
"""
[1] https://aip.scitation.org/doi/10.1063/1.2408420
[2] https://dx.doi.org/10.1016/j.cpc.2008.01.006
Reformulation fo the algorithm. This is implemented e.g., in YAFF.
https://github.com/molmod/yaff/blob/master/yaff/sampling/nvt.py
csvr_closure() is based on the implementation provided on Bussis homepage:
https://sites.google.com/site/giovannibussi/downloads/resamplekin.tgz
(At least in my implementation) there seems to be a problem with
the conserved quantity, which is not conserved at all ...
csvr_closure_2() is based on [2]
"""
RNG = default_rng()
def sum_noises(num, rng=None):
"""
Parameters
----------
num : int
Number of independent Gaussian noises to be squared.
rng : numpy.random.Generator, optional
Instances of a random number generator (RNG). If it is not provided the module-level
RNG will be used.
"""
if rng is None:
rng = RNG
if num == 0:
sum_ = 0.0
elif num == 1:
sum_ = rng.normal()**2
# nn even, dof - 1 odd
elif (num % 2) == 0:
sum_ = 2.0 * rng.gamma(shape=num/2)
# nn odd, dof - 1 even
else:
sum_ = 2.0 * rng.gamma(shape=(num-1)/2) + rng.normal()**2
return sum_
def csvr_closure(sigma, dof, dt, tau=100, rng=None):
"""
Parameters
----------
sigma : float
Target average value of the kinetic energy (1/2 dof k_b T) in the same units as
cur_kinetic_energy.
dof : int
Degrees of freedom.
tau : float
Timeconstant of the thermostat. tau : float
Timeconstant of the thermostat.
rng : numpy.random.Generator, optional
Instances of a random number generator (RNG). If it is not provided the module-level
RNG will be used.
"""
# Relaxation time of the thermostat in units of "how often this routine
# is called" (dt / timeconstant).
tau_t = dt / tau
if tau_t > 0.1:
factor = exp(-1.0 / tau_t)
else:
factor = 0.0
if rng is None:
rng = RNG
def resample_kin(cur_kinetic_energy):
"""
Parameters
----------
cur_kinetic_energy : float
Present value of the kinetic energy of the atoms to be thermalized
in arbitrary units.
"""
rr = rng.normal()
new_kinetic_energy = (
cur_kinetic_energy
+ (1.0 - factor)
* (sigma * (sum_noises(dof-1) + rr**2) / dof - cur_kinetic_energy)
+ 2.0 * rr * sqrt(cur_kinetic_energy * sigma / dof * (1.0 - factor) * factor)
)
alpha = sqrt(new_kinetic_energy / cur_kinetic_energy)
return alpha
return resample_kin
def csvr_closure_2(sigma, dof, dt, tau=100, rng=None):
if rng is None:
rng = RNG
c = exp(-dt / tau)
def resample_kin(cur_kinetic_energy):
"""Canonical stocastical velocity rescaling.
See dx.doi.org/10.1016/j.cpc.2008.01.006
"""
R = rng.normal()
S = np.sum(rng.normal(size=dof-1)**2)
quot = (1 - c) * sigma / (dof * cur_kinetic_energy)
alpha = sqrt(c + quot * (S + R**2) + 2 * R * sqrt(c*quot))
sign = np.sign(R + sqrt(c / quot))
return sign * alpha
return resample_kin
def berendsen_closure(sigma, dof, dt, tau=100, rng=None):
""" https://doi.org/10.1063/1.448118"""
tau_t = dt / tau
def resample_kin(cur_kinetic_energy):
alpha = sqrt(1 + tau_t * (sigma / cur_kinetic_energy - 1))
return alpha
return resample_kin
|
eljost/pysisyphus
|
pysisyphus/dynamics/thermostats.py
|
Python
|
gpl-3.0
| 3,678
|
[
"Gaussian"
] |
97fccb1b552f78ef503d0ddd3f92b79acfed12a34e51bb7281573200f9c89e71
|
########################################################################
# This program is copyright (c) Upinder S. Bhalla, NCBS, 2015.
# It is licenced under the GPL 2.1 or higher.
# There is no warranty of any kind. You are welcome to make copies under
# the provisions of the GPL.
# This programme illustrates building a panel of multiscale models to
# test neuronal plasticity in different contexts.
########################################################################
import numpy
import time
import pylab
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
sys.path.append('../../../Demos/util')
import rdesigneur as rd
import moogli
PI = 3.14159265359
useGssa = True
combineSegments = True
# Pick your favourite cell here.
#elecFileName = "ca1_minimal.p"
## Cell morphology from Bannister and Larkman J Neurophys 2015/NeuroMorpho
elecFileName = "h10.CNG.swc"
#elecFileName = "CA1.morph.xml"
#elecFileName = "VHC-neuron.CNG.swc"
synSpineList = []
synDendList = []
probeInterval = 0.1
probeAmplitude = 1.0
tetanusFrequency = 100.0
tetanusAmplitude = 1000
tetanusAmplitudeForSpines = 1000
frameRunTime = 1e-3 # 1 ms
baselineTime = 0.05
tetTime = 0.01
postTetTime = 0.01
runtime = baselineTime + tetTime + postTetTime
def buildRdesigneur():
'''
##################################################################
# Here we define which prototypes are to be loaded in to the system.
# Each specification has the format
# source [localName]
# source can be any of
# filename.extension, # Identify type of file by extension, load it.
# function(), # func( name ) builds object of specified name
# file.py:function() , # load Python file, run function(name) in it.
# moose.Classname # Make obj moose.Classname, assign to name.
# path # Already loaded into library or on path.
# After loading the prototypes, there should be an object called 'name'
# in the library.
##################################################################
'''
cellProto = [ [ "./cells/" + elecFileName, "elec" ] ]
chanProto = [
['./chans/hd.xml'], \
['./chans/kap.xml'], \
['./chans/kad.xml'], \
['./chans/kdr.xml'], \
['./chans/na3.xml'], \
['./chans/nax.xml'], \
['./chans/CaConc.xml'], \
['./chans/Ca.xml'], \
['./chans/NMDA.xml'], \
['./chans/Glu.xml'] \
]
spineProto = [ \
['makeSpineProto()', 'spine' ]
]
chemProto = []
##################################################################
# Here we define what goes where, and any parameters. Each distribution
# has the format
# protoName, path, field, expr, [field, expr]...
# where
# protoName identifies the prototype to be placed on the cell
# path is a MOOSE wildcard path specifying where to put things
# field is the field to assign.
# expr is a math expression to define field value. This uses the
# muParser. Built-in variables are:
# p, g, L, len, dia, maxP, maxG, maxL.
# where
# p = path distance from soma, threaded along dendrite
# g = geometrical distance from soma (shortest distance)
# L = electrotonic distance from soma: number of length constants
# len = length of dendritic compartment
# dia = diameter of dendritic compartment
# maxP = maximal value of 'p' for the cell
# maxG = maximal value of 'g' for the cell
# maxL = maximal value of 'L' for the cell
#
# The muParser provides most math functions, and the Heaviside
# function H(x) = 1 for x > 0 is also provided.
##################################################################
passiveDistrib = [
[ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5", \
"Em", "-58e-3", "initVm", "-65e-3" ], \
[ ".", "#axon#", "RA", "0.5" ] \
]
chanDistrib = [ \
["hd", "#dend#,#apical#", "Gbar", "5e-2*(1+(p*3e4))" ], \
["kdr", "#", "Gbar", "p < 50e-6 ? 500 : 100" ], \
["na3", "#soma#,#dend#,#apical#", "Gbar", "250" ], \
["nax", "#soma#,#axon#", "Gbar", "1250" ], \
["kap", "#axon#,#soma#", "Gbar", "300" ], \
["kap", "#dend#,#apical#", "Gbar", \
"300*(H(100-p*1e6)) * (1+(p*1e4))" ], \
["Ca_conc", "#soma#,#dend#,#apical#", "tau", "0.0133" ], \
["kad", "#soma#,#dend#,#apical#", "Gbar", \
"300*H(p - 100e-6)*(1+p*1e4)" ], \
["Ca", "#dend#,#apical#", "Gbar", "p<160e-6? 10+ p*0.25e-6 : 50" ], \
["Ca", "#soma#", "Gbar", "10" ], \
["glu", "#dend#,#apical#", "Gbar", "200*H(p-200e-6)" ], \
["NMDA", "#dend#,#apical#", "Gbar", "2*H(p-200e-6)" ] \
]
spineDistrib = [ \
["spine", '#apical#', "spineSpacing", "20e-6", \
"spineSpacingDistrib", "2e-6", \
"angle", "0", \
"angleDistrib", str( 2*PI ), \
"size", "1", \
"sizeDistrib", "0.5" ] \
]
chemDistrib = []
######################################################################
# Here we define the mappings across scales. Format:
# sourceObj sourceField destObj destField offset scale
# where the coupling expression is anything a muParser can evaluate,
# using the input variable x. For example: 8e-5 + 300*x
# For now, let's use existing adaptors which take an offset and scale.
######################################################################
adaptorList = []
######################################################################
# Having defined everything, now to create the rdesigneur and proceed
# with creating the model.
######################################################################
rd.addSpineProto() # This adds a version with an LCa channel by default.
rdes = rd.rdesigneur(
useGssa = useGssa, \
combineSegments = combineSegments, \
stealCellFromLibrary = True, \
passiveDistrib = passiveDistrib, \
spineDistrib = spineDistrib, \
chanDistrib = chanDistrib, \
chemDistrib = chemDistrib, \
cellProto = cellProto, \
chanProto = chanProto, \
chemProto = chemProto, \
adaptorList = adaptorList
)
#spineProto = spineProto, \
return rdes
def buildPlots( rdes ):
graphs = moose.Neutral( '/graphs' )
vtab = moose.Table( '/graphs/VmTab' )
moose.connect( vtab, 'requestOut', rdes.soma, 'getVm' )
def displayPlots():
pylab.figure(1, figsize = (8,10 ) )
pylab.subplot( 1,1,1)
for i in moose.wildcardFind( "/graphs/#VmTab" ):
t = numpy.arange( 0, i.vector.size, 1 ) * i.dt
pylab.plot( t, i.vector, label = i.name )
pylab.xlabel( "Time (s)" )
pylab.legend()
pylab.title( 'Vm' )
pylab.figure(2, figsize= (8,10))
ax = pylab.subplot( 1,1,1 )
neuron = moose.element( '/model/elec' )
comptDistance = dict( zip( neuron.compartments, neuron.pathDistanceFromSoma ) )
for i in moose.wildcardFind( '/library/#[ISA=ChanBase]' ):
chans = moose.wildcardFind( '/model/elec/#/' + i.name )
print i.name, len( chans )
p = [ 1e6*comptDistance.get( j.parent, 0) for j in chans ]
Gbar = [ j.Gbar/(j.parent.length * j.parent.diameter * PI) for j in chans ]
if len( p ) > 2:
pylab.plot( p, Gbar, linestyle = 'None', marker = ".", label = i.name )
sortedGbar = sorted(zip(p, Gbar), key=lambda x: x[0])
ax.set_yscale( 'log' )
pylab.xlabel( "Distance from soma (microns)" )
pylab.ylabel( "Channel density (Seimens/sq mtr)" )
pylab.legend()
pylab.title( 'Channel distribution' )
pylab.show()
def create_vm_viewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path)
normalizer = moogli.utilities.normalizer(-0.08,
0.02,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(0.0,
0.0,
1.0,
1.0),
moogli.colors.Color(1.0,
1.0,
0.0,
0.1)])
mapper = moogli.utilities.mapper(colormap, normalizer)
vms = [moose.element(x).Vm for x in network.shapes.keys()]
network.set("color", vms, mapper)
def prelude(view):
view.pitch(PI/2)
view.zoom(0.4)
def interlude(view):
moose.start(frameRunTime)
vms = [moose.element(x).Vm for x in network.shapes.keys()]
network.set("color", vms, mapper)
view.yaw(0.01)
currTime = moose.element('/clock').currentTime
if currTime < runtime:
deliverStim(currTime)
else:
view.stop()
def postlude(view):
displayPlots()
viewer = moogli.Viewer("vm-viewer")
viewer.attach_shapes(network.shapes.values())
view = moogli.View("vm-view",
prelude=prelude,
interlude=interlude,
postlude=postlude)
viewer.attach_view(view)
return viewer
def create_ca_viewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path)
ca_elements = []
for compartment_path in network.shapes.keys():
if moose.exists(compartment_path + '/Ca_conc'):
ca_elements.append(moose.element(compartment_path + '/Ca_conc'))
else:
ca_elements.append(moose.element('/library/Ca_conc'))
normalizer = moogli.utilities.normalizer(0.0,
0.002,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(1.0,
0.0,
0.0,
1.0),
moogli.colors.Color(0.0,
1.0,
1.0,
0.1)])
mapper = moogli.utilities.mapper(colormap, normalizer)
cas = [element.Ca for element in ca_elements]
network.set("color", cas, mapper)
def prelude(view):
view.pitch(PI/2)
view.zoom(0.4)
def interlude(view):
moose.start(frameRunTime)
cas = [element.Ca for element in ca_elements]
network.set("color", cas, mapper)
view.yaw(0.01)
currTime = moose.element('/clock').currentTime
if currTime < runtime:
deliverStim(currTime)
else:
view.stop()
viewer = moogli.Viewer("ca-viewer")
viewer.attach_shapes(network.shapes.values())
view = moogli.View("ca-view",
prelude=prelude,
interlude=interlude)
viewer.attach_view(view)
return viewer
def build3dDisplay(rdes):
print "building 3d Display"
app = QtGui.QApplication(sys.argv)
vm_viewer = create_vm_viewer(rdes)
vm_viewer.resize(700, 900)
vm_viewer.show()
vm_viewer.start()
ca_viewer = create_ca_viewer(rdes)
ca_viewer.resize(700, 900)
ca_viewer.show()
ca_viewer.start()
return app.exec_()
def deliverStim( currTime ):
if currTime > baselineTime and currTime < baselineTime + tetTime:
# deliver tet stim
step = int ( (currTime - baselineTime) / frameRunTime )
tetStep = int( 1.0 / (tetanusFrequency * frameRunTime ) )
if step % tetStep == 0:
for i in synDendList:
i.activation( tetanusAmplitude )
for i in synSpineList:
i.activation( tetanusAmplitudeForSpines )
else:
# deliver probe stim
step = int (currTime / frameRunTime )
probeStep = int( probeInterval / frameRunTime )
if step % probeStep == 0:
print "Doing probe Stim at ", currTime
for i in synSpineList:
i.activation( probeAmplitude )
def main():
global synSpineList
global synDendList
numpy.random.seed( 1234 )
rdes = buildRdesigneur()
rdes.buildModel( '/model' )
assert( moose.exists( '/model' ) )
synSpineList = moose.wildcardFind( "/model/elec/#head#/glu,/model/elec/#head#/NMDA" )
temp = set( moose.wildcardFind( "/model/elec/#/glu,/model/elec/#/NMDA" ) )
synDendList = list( temp - set( synSpineList ) )
print "num spine, dend syns = ", len( synSpineList ), len( synDendList )
moose.reinit()
#for i in moose.wildcardFind( '/model/elec/#apical#/#[ISA=CaConcBase]' ):
#print i.path, i.length, i.diameter, i.parent.length, i.parent.diameter
buildPlots(rdes)
# Run for baseline, tetanus, and post-tetanic settling time
t1 = time.time()
build3dDisplay(rdes)
print 'real time = ', time.time() - t1
if __name__ == '__main__':
main()
|
dilawar/moose-full
|
moose-examples/paper-2015/Fig2_elecModels/Fig2C.py
|
Python
|
gpl-2.0
| 13,821
|
[
"MOOSE",
"NEURON"
] |
bee14fb20884ebb11d735ae8cbf7e1c9694c504c226a33ea6a167e0aa6785163
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010- Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
AddressBookPage
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.plug.report import Bibliography
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import FULLCLEAR
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
class AddressBookPage(BasePage):
"""
Create one page for one Address
"""
def __init__(self, report, title, person_handle, has_add, has_res, has_url):
"""
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: person_handle -- the url, address and residence to use
for the report
@param: has_add -- the address to use for the report
@param: has_res -- the residence to use for the report
@param: has_url -- the url to use for the report
"""
person = report.database.get_person_from_handle(person_handle)
BasePage.__init__(self, report, title, person.gramps_id)
self.bibli = Bibliography()
self.uplink = True
# set the file name and open file
output_file, sio = self.report.create_file(person_handle, "addr")
result = self.write_header(_("Address Book"))
addressbookpage, dummy_head, dummy_body, outerwrapper = result
# begin address book page division and section title
with Html("div", class_="content",
id="AddressBookDetail") as addressbookdetail:
outerwrapper += addressbookdetail
link = self.new_person_link(person_handle, uplink=True,
person=person)
addressbookdetail += Html("h3", link)
# individual has an address
if has_add:
addressbookdetail += self.display_addr_list(has_add, None)
# individual has a residence
if has_res:
addressbookdetail.extend(
self.dump_residence(res)
for res in has_res
)
# individual has a url
if has_url:
addressbookdetail += self.display_url_list(has_url)
# add fullclear for proper styling
# and footer section to page
footer = self.write_footer(None)
outerwrapper += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(addressbookpage, output_file, sio, 0)
|
sam-m888/gramps
|
gramps/plugins/webreport/addressbook.py
|
Python
|
gpl-2.0
| 4,636
|
[
"Brian"
] |
80bd8b741ca194dbf9e58f52d72e67cd890e020cfccde586e68278e34b9932a6
|
# Rewrite of the original script; it worked, but not well enough...
# still ugly tho
CONFIGFILE = "config.txt"
import re
import json
import time
import requests
import datetime
import configparser
import imgurpython
# overwrite print() to only print ascii
import builtins
def asciify(text):
return ''.join([i if ord(i) < 128 else '?' for i in text])
def print(*args, **kwargs):
newargs = []
for text in args:
newargs.append(asciify(text))
builtins.print(*newargs, **kwargs)
class Client:
"""
Imgur API and config+authentication
"""
def __init__(self, config):
self.config = config
if not self.config.has_section("auth"):
self.config.modified = True
self.config["auth"] = {}
if ( not self.config.has_option("auth", "client_id")
and not self.config.has_option("auth", "client_secret")):
self.prompt_client_info()
self.connect()
self.account = self.client.get_account("me")
def prompt_client_info(self):
print("No client info found. If you haven't yet, visit")
print("https://api.imgur.com/oauth2/addclient and register an application.")
print("Pick 'OAuth 2 authorization without a callback URL'.")
print("If you have already registered an application, visit")
print("https://imgur.com/account/settings/apps and generate a new secret.")
print("Then, fill in the client id and secret below.")
self.config["auth"]["client_id"] = input("Client ID: ").strip()
self.config["auth"]["client_secret"] = input("Client Secret: ").strip()
self.config.modified = True
print("")
def prompt_pin(self):
"""
prompt_pin() -> pin
Assumes that there is already a client connected to Imgur.
"""
authorization_url = self.client.get_auth_url("pin")
print("Please visit {}".format(authorization_url))
print("and enter the PIN code displayed on the site.")
return input("PIN code: ").strip()
def connect(self):
"""
Creates and connects self.client.
"""
if self.config.has_option("auth", "refresh_token"):
self.client = imgurpython.ImgurClient(self.config["auth"]["client_id"],
self.config["auth"]["client_secret"],
refresh_token=self.config["auth"]["refresh_token"])
else:
self.client = imgurpython.ImgurClient(self.config["auth"]["client_id"],
self.config["auth"]["client_secret"])
credentials = self.client.authorize(self.prompt_pin(), "pin")
self.config["auth"]["refresh_token"] = credentials["refresh_token"]
self.config.modified = True
self.client.set_user_auth(credentials["access_token"], credentials["refresh_token"])
class Subscribers:
"""
Manages subscribers and subscribing/unsubscribing
"""
subregex = re.compile(r"^<?subscribe>?.?$",
flags=re.IGNORECASE)
unsubregex = re.compile(r"^<?unsubscribe>?\.?$",
flags=re.IGNORECASE)
askregex = re.compile(r"subscri|\bsign.*\b(up|in|on)\b|\b(join|tag|includ)|<.*>|\bdot|\b(leav|cancel)$",
flags=re.IGNORECASE)
def __init__(self, subsfile):
self.subsfile = subsfile
self.subs = {}
self.modified = False
self.load()
def load(self):
try:
with open(self.subsfile) as f:
for line in f:
self.load_line(line)
except FileNotFoundError:
print("File not found: {}".format(repr(self.subsfile)))
print("If you already have a subscribers file, you can set it in the config file.")
print("A new file will be created.")
def load_line(self, line):
if line[0] == "#":
return
parts = line[:-1].split(" ")
parts = [item for item in parts if item] # remove empty strings
if not parts:
return
status = parts[0]
nick = parts[1].lower()
datetime = int(parts[2])
self.subs[nick] = {"status": status, "dt": datetime}
def save(self):
with open(self.subsfile, "w") as f:
for sub, info in sorted(self.subs.items()):
f.write("{} {} {}\n".format(info["status"], sub, info["dt"]))
def add(self, nick, datetime=None):
print("Adding {}.".format(nick))
nick = nick.lower()
if nick in self.subs:
self.subs[nick] = {"status": "s", "dt": max(datetime or 0, self.subs[nick]["dt"])}
else:
self.subs[nick] = {"status": "s", "dt": datetime or 0}
self.modified = True
def remove(self, nick, datetime=None):
print("Removing {}.".format(nick))
nick = nick.lower()
if nick in self.subs:
self.subs[nick] = {"status": "u", "dt": max(datetime or 0, self.subs[nick]["dt"])}
else:
self.subs[nick] = {"status": "u", "dt": datetime or 0}
self.modified = True
def subscribed(self):
return {sub: info for sub, info in self.subs.items() if info["status"] == "s"}
def clean_up(self):
self.subs = self.subscribed()
self.modified = True
def count(self):
return len(self.subscribed())
def to_comments(self):
comments = []
comment = ""
for sub in self.subscribed():
sub = "@" + sub
if comment:
if len(comment) + len(sub) + 1 <= 140: #character limit
comment += " " + sub
continue
else:
comments.append(comment)
comment = sub
if comment:
comments.append(comment)
return comments
def check_comment(self, nick, comment, datetime):
"""
Returns True when comment is to be added to the ignore list.
"""
nick = nick.lower()
if nick in self.subs and self.subs[nick]["dt"] >= datetime:
return
if self.subregex.search(comment):
self.add(nick, datetime=datetime)
elif self.unsubregex.search(comment):
self.remove(nick, datetime=datetime)
elif self.askregex.search(comment):
action = self.ask_user_about_comment(comment)
if action == "add":
self.add(nick, datetime=datetime)
elif action == "remove":
self.remove(nick, datetime=datetime)
else:
return True
def ask_user_about_comment(self, comment):
print("\nWhat is the following comment?")
print(comment)
print("[s] subscribe | [d] unsubscribe | [anything else] neither")
action = input("[s/d/f] ").strip().lower()
print("")
if action == "s":
return "add"
elif action == "d":
return "remove"
class Albums:
"""
Manages added albums and keeps track of comments with uninteresting content
"""
def __init__(self, albumsfile):
self.albumsfile = albumsfile
self.albums = {}
self.modified = False
self.load()
def load(self):
try:
with open(self.albumsfile) as f:
for line in f:
self.load_line(line)
except FileNotFoundError:
print("File not found: {}".format(repr(self.albumsfile)))
print("If you already have an albums file, you can set it in the config file.")
print("A new file will be created.")
def load_line(self, line):
if line[0] == "#":
return
parts = line[:-1].split(" ", 1)
if len(parts) < 2:
return
album = parts[0]
comments = json.loads(parts[1])
if album in self.albums:
for comment in comments:
if not comment in self.albums[album]:
self.albums[album].append(comment)
else:
self.albums[album] = comments
def save(self):
with open(self.albumsfile, "w") as f:
for album, comments in sorted(self.albums.items()):
f.write("{} {}\n".format(album, json.dumps(comments)))
def add(self, album):
print ("Adding album {}".format(album))
if not album in self.albums:
self.albums[album] = []
self.modified = True
def remove(self, album):
print ("Removing album {}".format(album))
if album in self.albums:
del self.albums[album]
self.modified = True
def add_comment(self, album, comment):
print ("Adding comment {} to album {} ignore list".format(comment, album))
if not comment in self.albums[album]:
self.albums[album].append(comment)
self.modified = True
def in_album(self, album, comment):
return comment in self.albums[album]
class ITBot:
"""
Manage the input and resources
"""
def __init__(self, configfile="config.txt"):
"""
Load the config and connect to imgur.
"""
self.configfile = configfile
self.config = configparser.ConfigParser()
self.config.read(self.configfile)
self.config.modified = False
if not self.config.has_section("misc"):
self.config["misc"] = {}
self.config.modified = True
if not self.config.has_option("misc", "delay"):
self.config["misc"]["delay"] = "10"
self.config.modified = True
if not self.config.has_option("misc", "retry_delay"):
self.config["misc"]["retry_delay"] = "60"
self.config.modified = True
if not self.config.has_option("misc", "branches_per_node"):
self.config["misc"]["branches_per_node"] = "10"
self.config.modified = True
if not self.config.has_option("misc", "subsfile"):
self.config["misc"]["subsfile"] = "subscribers.txt"
self.config.modified = True
if not self.config.has_option("misc", "albumsfile"):
self.config["misc"]["albumsfile"] = "albums.txt"
self.config.modified = True
self.client = Client(self.config)
self.subs = Subscribers(self.config["misc"]["subsfile"])
self.albums = Albums(self.config["misc"]["albumsfile"])
self._commands = {}
self._add_command("quit", self.command_quit, "Quit.",
("It's just quitting. Why would you call help on that?\n"
"Ctrl+D (EOF) or Ctrl+C (KeyboardInterrupt) work too."))
self._add_command("q", self.command_quit, "Short for 'quit'.",
("You seem desparate... There really is nothing new here."))
self._add_command("help", self.command_help, "Show th- Oh, you already figured it out...",
("I believe there is nothing more I could tell you about this command.\n"
"Go and try out the other commands instead of doing - well, this :P"))
self._add_command("comment", self.command_comment, "Comment on an image with all your subs.",
("comment <image_id>\n"
"Posts a top-level comment and then replies with the full list of your subs."))
self._add_command("scan", self.command_scan, "Scan your albums' comments for (un)subscribers.",
("Scans through the comments below your albums and processes any obvious '(un)subscribe's.\n"
"In difficult cases, presents the comment to you and lets you decide."))
self._add_command("add", self.command_add, "Add subscribers.",
("add <nick> [<nick> [...]]\n"
"List all the nicks after the command and they'll be added to your\n"
"subs in the subscribers file."))
self._add_command("remove", self.command_remove, "Remove subscribers.",
("remove <nick> [<nick> [...]]\n"
"Works the same way as add, but in reverse :P"))
self._add_command("reg", self.command_reg, "Register albums.",
("reg <album_id> [<album_id> [...]]\n"
"Register albums to be scanned by the scan command."))
self._add_command("dereg", self.command_dereg, "Deregister albums.",
("dereg <album_id> [<album_id> [...]]\n"
"The albums will no longer be included in further calls to the scan command.\n"
"WARNING: This also deletes all info about messages from those albums which were\n"
"marked as \"ignore\" (neither a subscribe nor an unsubscribe)."))
self._add_command("count", self.command_count, "Boost ego.",
("Lean back and relax"))
self._add_command("cleanup", self.command_count, "Removes all unsubscribed nicks from the subsfile.",
("Don't do this unless your subsfile is too large.\n"
"Normally, it is not necessary to clean up at all."))
def _add_command(self, command, function, shorthelp, longhelp):
"""
Helps organising commands
"""
self._commands[command] = {
"function": function,
"shorthelp": shorthelp,
"longhelp": longhelp
}
def fancy_intro(self):
"""
Nothing important...
"""
logo = [" ___________________",
" .' '.",
" / _ \\",
"| (_)_ __ __ _ _ _ _ _ |",
"| | | ' \/ _` | || | '_| |",
"| |_|_|_|_\__, |\_,_|_| |",
" \\ |___/ /",
" '.___________________.'"]
for line in logo:
print(line)
time.sleep(0.1)
def fancy_outtro(self):
"""
Nothing important...
"""
logo = [" ________________",
" .' '.",
" / ____ _ \\",
"| | __ ) _ _ ___| | |",
"| | _ \| | | |/ _ \ | |",
"| | |_) | |_| | __/_| |",
"| |____/ \__, |\___(_) |",
" \\ |___/ /",
" '.________________.'"]
for line in logo:
print(line)
time.sleep(0.1)
def command_help(self, args):
if args:
if args[0] in self._commands:
print(self._commands[args[0]]["longhelp"])
else:
print("No help found for {}. You might want to check 'help'.".format(args[0]))
else:
print("Use 'help <command>' for a more detailed help text.\n")
for command, info in sorted(self._commands.items()):
print(" {} - {}".format(command.ljust(10), info["shorthelp"]))
def command_quit(self, args):
return True
def command_add(self, args):
if not args:
print("No names found, check the 'help subadd' or just enter some names...")
return
for arg in args:
self.subs.add(arg)
def command_remove(self, args):
if not args:
print("No names found, check the 'help subremove' or just enter some names...")
return
for arg in args:
self.subs.remove(arg)
def command_count(self, args):
print("You currently have {} subscribers.".format(self.subs.count()))
print("\\(^o^)/")
def command_comment(self, args):
try:
image_id = args[0]
except IndexError:
print("Image ID missing. Maybe check the 'help comment'?")
return
comments = self.subs.to_comments()
print("{} subscribers in {} comments.".format(self.subs.count(), len(comments)))
top_comment = input("Top-level comment: ").strip()
if not top_comment:
print("Comment can't be empty.")
return
if len(top_comment) > 140:
print("Too many characters (>140), aborting!")
return
print("\nYou entered the following:")
print("Image ID:", repr(image_id))
print("Top-level comment:", repr(top_comment))
if input("Do you want to continue? [Y/n] ").lower() != "y":
return
# use tree of comments to lower the lag on mobile
comment_count = len(comments)
print("\nBuilding tree")
tree = self.build_comment_tree(comments)
print("Posting top-level comment")
root_comment = self.client.client.post_comment(image_id, top_comment)
print("Posting rest of comments")
print("This may take a few hours.")
print("The number of branches per node can be adjusted in the config file.")
self.post_comment_tree(image_id, tree, root_comment["id"], comment_count)
# old comment posting code
"""
print("\nPosting top-level comment")
root_comment = self.client.client.post_comment(image_id, top_comment)
for index, comment in enumerate(comments):
print("Posting comment {} of {}".format(index+1, len(comments)))
while(True):
time.sleep(self.config.getint("misc", "delay"))
try:
self.client.client.post_comment_reply(root_comment["id"], image_id, comment)
except imgurpython.helpers.error.ImgurClientError:
print("An error occurred while sending this comment. Retrying...")
except imgurpython.helpers.error.ImgurClientRateLimitError:
print("Rate limit hit. Retrying...")
except requests.exceptions.ConnectionError:
delay = self.config.getint("misc", "retry_delay")
print("Connection problems, retrying in {}s...".format(delay))
time.sleep(delay)
else:
break
"""
def traverse_level(self, tree, level):
if level == 0:
yield from tree.values()
else:
for _, branch in tree.items():
yield from self.traverse_level(branch, level - 1)
def build_comment_tree(self, comments):
tree = {"root":{}}
level = 0
while True:
for branch in self.traverse_level(tree, level):
for i in range(self.config.getint("misc", "branches_per_node")):
if comments:
branch[comments.pop()] = {}
else:
return tree["root"]
level += 1
def post_comment_tree(self, image_id, tree, root_comment_id, comment_count):
for comment, branch in tree.items():
time.sleep(self.config.getint("misc", "delay"))
while(True):
try:
comment_id = self.client.client.post_comment_reply(root_comment_id, image_id, comment)["id"]
except imgurpython.helpers.error.ImgurClientError as e:
print("An error occurred while sending this comment ({}: {}). Retrying...".format(e.status_code, e.error_message))
except imgurpython.helpers.error.ImgurClientRateLimitError:
print("Rate limit hit. Retrying...")
except requests.exceptions.ConnectionError:
print("Connection problems. Retrying...")
else:
time_per_comment = self.config.getint("misc", "delay") + 1
delta = datetime.timedelta(seconds=time_per_comment*comment_count)
print("{} comments left; estimated time: {}".format(comment_count, delta))
comment_count -= 1
break
time.sleep(self.config.getint("misc", "retry_delay")) # something went wrong, so we wait...
comment_count = self.post_comment_tree(image_id, branch, comment_id, comment_count)
return comment_count
def command_scan(self, args):
for album in self.albums.albums:
print("Scanning album {}...".format(album))
try:
comments = self.client.client.gallery_item_comments(album, sort="new")
except imgurpython.helpers.error.ImgurClientError:
print("Error while loading comments. You might want to double-check your albums file.")
else:
for comment in self.flatten_comments(comments):
if comment.author_id != self.client.account.id \
and not self.albums.in_album(album, comment.id) \
and self.subs.check_comment(comment.author, comment.comment, comment.datetime):
self.albums.add_comment(album, comment.id)
def command_reg(self, args):
if not args:
print("Album IDs missing. Maybe check the 'help reg'?")
for album in args:
self.albums.add(album)
def command_dereg(self, args):
if not args:
print("Album IDs missing. Maybe check the 'help dereg'?")
for album in args:
self.albums.remove(album)
def flatten_comments(self, comments):
for comment in comments:
yield comment
if comment.children:
yield from self.flatten_comments(comment.children)
def parse_command(self, inputstr):
"""
parse_command(inputstring) -> command, [args]
In case command parsing will need to be improved in the future.
"""
args = inputstr.split(" ")
args = [arg for arg in args if arg] # remove empty strings
if not args: # no command found
return "", []
command = args[0]
args = args[1:]
return command, args
def prompt_command(self):
"""
prompt_command() -> exit
Takes a command and calls the respective functions.
Returns True if user exited.
"""
inputstr = input("\n>>> ")
command, args = self.parse_command(inputstr)
if not command:
return
if command in self._commands:
return self._commands[command]["function"](args)
else:
print("Invalid command. Type 'help' for a list of available commands.")
def interactive(self):
"""
Start the interactive mode (entering commands)
"""
self.fancy_intro()
print("\nWelcome to TITsBot v.2 *dial-up noises in background*")
print("('help' for a list of commands)")
try:
while(True):
if self.prompt_command():
break
except (EOFError, KeyboardInterrupt):
print("")
if self.config.modified:
print("Saving config.")
with open(self.configfile, "w") as f:
self.config.write(f)
if self.subs.modified:
print("Saving subs.")
self.subs.save()
if self.albums.modified:
print("Saving albums.")
self.albums.save()
self.fancy_outtro()
print("\nGoodbye! *beeping noise, then bluescreen*")
if __name__ == "__main__":
bot = ITBot(CONFIGFILE)
bot.interactive()
|
Garmelon/itbot
|
script.py
|
Python
|
mit
| 20,301
|
[
"VisIt"
] |
ca48569c064d0cedd61471798d9f51951cfd8efddae2492a95c7a1373e5a5e15
|
from io import BytesIO
import numpy as np
import warnings
from .. import Variable
from ..conventions import cf_encoder
from ..core.pycompat import iteritems, basestring, OrderedDict
from ..core.utils import Frozen, FrozenOrderedDict
from ..core.indexing import NumpyIndexingAdapter
from .common import WritableCFDataStore
from .netcdf3 import (is_valid_nc3_name, encode_nc3_attr_value,
encode_nc3_variable)
def _decode_string(s):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace')
return s
def _decode_attrs(d):
# don't decode _FillValue from bytes -> unicode, because we want to ensure
# that its type matches the data exactly
return OrderedDict((k, v if k == '_FillValue' else _decode_string(v))
for (k, v) in iteritems(d))
class ScipyArrayWrapper(NumpyIndexingAdapter):
def __init__(self, netcdf_file, variable_name):
self.netcdf_file = netcdf_file
self.variable_name = variable_name
@property
def array(self):
# We can't store the actual netcdf_variable object or its data array,
# because otherwise scipy complains about variables or files still
# referencing mmapped arrays when we try to close datasets without
# having read all data in the file.
return self.netcdf_file.variables[self.variable_name].data
@property
def dtype(self):
# always use native endianness
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
data = super(ScipyArrayWrapper, self).__getitem__(key)
# Copy data if the source file is mmapped. This makes things consistent
# with the netCDF4 library by ensuring we can safely read arrays even
# after closing associated files.
copy = self.netcdf_file.use_mmap
data = np.array(data, dtype=self.dtype, copy=copy)
return data
class ScipyDataStore(WritableCFDataStore):
"""Store for reading and writing data via scipy.io.netcdf.
This store has the advantage of being able to be initialized with a
StringIO object, allow for serialization without writing to disk.
It only supports the NetCDF3 file-format.
"""
def __init__(self, filename_or_obj, mode='r', format=None, group=None,
writer=None, mmap=None):
import scipy
import scipy.io
if mode != 'r' and scipy.__version__ < '0.13': # pragma: no cover
warnings.warn('scipy %s detected; '
'the minimal recommended version is 0.13. '
'Older version of this library do not reliably '
'read and write files.'
% scipy.__version__, ImportWarning)
if group is not None:
raise ValueError('cannot save to a group with the '
'scipy.io.netcdf backend')
if format is None or format == 'NETCDF3_64BIT':
version = 2
elif format == 'NETCDF3_CLASSIC':
version = 1
else:
raise ValueError('invalid format for scipy.io.netcdf backend: %r'
% format)
# if filename is a NetCDF3 bytestring we store it in a StringIO
if (isinstance(filename_or_obj, basestring) and
filename_or_obj.startswith('CDF')):
# TODO: this check has the unfortunate side-effect that
# paths to files cannot start with 'CDF'.
filename_or_obj = BytesIO(filename_or_obj)
self.ds = scipy.io.netcdf_file(
filename_or_obj, mode=mode, mmap=mmap, version=version)
super(ScipyDataStore, self).__init__(writer)
def open_store_variable(self, name, var):
return Variable(var.dimensions, ScipyArrayWrapper(self.ds, name),
_decode_attrs(var._attributes))
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in iteritems(self.ds.variables))
def get_attrs(self):
return Frozen(_decode_attrs(self.ds._attributes))
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def set_dimension(self, name, length):
if name in self.dimensions:
raise ValueError('%s does not support modifying dimensions'
% type(self).__name__)
self.ds.createDimension(name, length)
def _validate_attr_key(self, key):
if not is_valid_nc3_name(key):
raise ValueError("Not a valid attribute name")
def set_attribute(self, key, value):
self._validate_attr_key(key)
value = encode_nc3_attr_value(value)
setattr(self.ds, key, value)
def prepare_variable(self, name, variable, check_encoding=False):
variable = encode_nc3_variable(variable)
if check_encoding and variable.encoding:
raise ValueError('unexpected encoding for scipy backend: %r'
% list(variable.encoding))
self.set_necessary_dimensions(variable)
data = variable.data
# nb. this still creates a numpy array in all memory, even though we
# don't write the data yet; scipy.io.netcdf does not not support
# incremental writes.
self.ds.createVariable(name, data.dtype, variable.dims)
scipy_var = self.ds.variables[name]
for k, v in iteritems(variable.attrs):
self._validate_attr_key(k)
setattr(scipy_var, k, v)
return scipy_var, data
def sync(self):
super(ScipyDataStore, self).sync()
self.ds.flush()
def close(self):
self.ds.close()
def __exit__(self, type, value, tb):
self.close()
|
petercable/xray
|
xray/backends/scipy_.py
|
Python
|
apache-2.0
| 5,813
|
[
"NetCDF"
] |
f7340620e52c36bca04afabbb4f196698d8414cc961148c388b993450879f35c
|
# -*- coding: utf-8 -*-
"""
Acceptance tests for Video.
"""
import json
import requests
from .helpers import UniqueCourseTest
from ..pages.lms.video import VideoPage
from ..pages.lms.tab_nav import TabNavPage
from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.course_info import CourseInfoPage
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from box.test.flaky import flaky
VIDEO_SOURCE_PORT = 8777
YOUTUBE_STUB_PORT = 9080
YOUTUBE_STUB_URL = 'http://127.0.0.1:{}/'.format(YOUTUBE_STUB_PORT)
HTML5_SOURCES = [
'http://localhost:{0}/gizmo.mp4'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.webm'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.ogv'.format(VIDEO_SOURCE_PORT),
]
HTML5_SOURCES_INCORRECT = [
'http://localhost:{0}/gizmo.mp99'.format(VIDEO_SOURCE_PORT),
]
class YouTubeConfigError(Exception):
"""
Error occurred while configuring YouTube Stub Server.
"""
pass
@flaky
class VideoBaseTest(UniqueCourseTest):
"""
Base class for tests of the Video Player
Sets up the course and provides helper functions for the Video tests.
"""
def setUp(self):
"""
Initialization of pages and course fixture for video tests
"""
super(VideoBaseTest, self).setUp()
self.video = VideoPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.metadata = None
self.assets = []
self.verticals = None
self.youtube_configuration = {}
# reset youtube stub server
self.addCleanup(self._reset_youtube_stub_server)
def navigate_to_video(self):
""" Prepare the course and get to the video and render it """
self._install_course_fixture()
self._navigate_to_courseware_video_and_render()
def navigate_to_video_no_render(self):
"""
Prepare the course and get to the video unit
however do not wait for it to render, because
the has been an error.
"""
self._install_course_fixture()
self._navigate_to_courseware_video_no_render()
def _install_course_fixture(self):
""" Install the course fixture that has been defined """
if self.assets:
self.course_fixture.add_asset(self.assets)
chapter_sequential = XBlockFixtureDesc('sequential', 'Test Section')
chapter_sequential.add_children(*self._add_course_verticals())
chapter = XBlockFixtureDesc('chapter', 'Test Chapter').add_children(chapter_sequential)
self.course_fixture.add_children(chapter)
self.course_fixture.install()
if len(self.youtube_configuration) > 0:
self._configure_youtube_stub_server(self.youtube_configuration)
def _add_course_verticals(self):
"""
Create XBlockFixtureDesc verticals
:return: a list of XBlockFixtureDesc
"""
xblock_verticals = []
_verticals = self.verticals
# Video tests require at least one vertical with a single video.
if not _verticals:
_verticals = [[{'display_name': 'Video', 'metadata': self.metadata}]]
for vertical_index, vertical in enumerate(_verticals):
xblock_verticals.append(self._create_single_vertical(vertical, vertical_index))
return xblock_verticals
def _create_single_vertical(self, vertical, vertical_index):
"""
Create a single course vertical of type XBlockFixtureDesc with category `vertical`.
A single course vertical can contain single or multiple video modules.
:param vertical: vertical data list
:param vertical_index: index for the vertical display name
:return: XBlockFixtureDesc
"""
xblock_course_vertical = XBlockFixtureDesc('vertical', 'Test Vertical-{0}'.format(vertical_index))
for video in vertical:
xblock_course_vertical.add_children(
XBlockFixtureDesc('video', video['display_name'], metadata=video.get('metadata')))
return xblock_course_vertical
def _navigate_to_courseware_video(self):
""" Register for the course and navigate to the video unit """
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
def _navigate_to_courseware_video_and_render(self):
""" Wait for the video player to render """
self._navigate_to_courseware_video()
self.video.wait_for_video_player_render()
def _navigate_to_courseware_video_no_render(self):
""" Wait for the video Xmodule but not for rendering """
self._navigate_to_courseware_video()
self.video.wait_for_video_class()
def _configure_youtube_stub_server(self, config):
"""
Allow callers to configure the stub server using the /set_config URL.
:param config: Configuration dictionary.
The request should have PUT data, such that:
Each PUT parameter is the configuration key.
Each PUT value is a JSON-encoded string value for the configuration.
:raise YouTubeConfigError:
"""
youtube_stub_config_url = YOUTUBE_STUB_URL + 'set_config'
config_data = {param: json.dumps(value) for param, value in config.items()}
response = requests.put(youtube_stub_config_url, data=config_data)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL {0}, Configuration Data: {1}, Status was {2}'.format(
youtube_stub_config_url, config, response.status_code))
def _reset_youtube_stub_server(self):
"""
Reset YouTube Stub Server Configurations using the /del_config URL.
:raise YouTubeConfigError:
"""
youtube_stub_config_url = YOUTUBE_STUB_URL + 'del_config'
response = requests.delete(youtube_stub_config_url)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL: {0} Status was {1}'.format(
youtube_stub_config_url, response.status_code))
def metadata_for_mode(self, player_mode, additional_data=None):
"""
Create a dictionary for video player configuration according to `player_mode`
:param player_mode (str): Video player mode
:param additional_data (dict): Optional additional metadata.
:return: dict
"""
metadata = {}
if player_mode == 'html5':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
metadata.update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES_INCORRECT
})
if additional_data:
metadata.update(additional_data)
return metadata
def open_video(self, video_display_name):
"""
Navigate to sequential specified by `video_display_name`
:param video_display_name (str): Sequential Title
"""
self.course_nav.go_to_sequential(video_display_name)
self.video.wait_for_video_player_render()
class YouTubeVideoTest(VideoBaseTest):
""" Test YouTube Video Player """
def setUp(self):
super(YouTubeVideoTest, self).setUp()
def test_youtube_video_rendering_wo_html5_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
"""
self.navigate_to_video()
# Verify that video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
self.video.show_captions()
# Verify that we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_cc_button_transcripts_and_sub_fields_empty(self):
"""
Scenario: CC button works correctly if transcripts and sub fields are empty,
but transcript file exists in assets (Youtube mode of Video component)
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
Then I see the correct english text in the captions
"""
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
self.navigate_to_video()
self.video.show_captions()
# Verify that we see "Hi, welcome to Edx." text in the captions
self.assertIn('Hi, welcome to Edx.', self.video.captions_text)
def test_cc_button_hidden_no_translations(self):
"""
Scenario: CC button is hidden if no translations
Given the course has a Video component in "Youtube" mode
Then the "CC" button is hidden
"""
self.navigate_to_video()
self.assertFalse(self.video.is_button_shown('CC'))
def test_fullscreen_video_alignment_with_transcript_hidden(self):
"""
Scenario: Video is aligned with transcript hidden in fullscreen mode
Given the course has a Video component in "Youtube" mode
When I view the video at fullscreen
Then the video with the transcript hidden is aligned correctly
"""
self.navigate_to_video()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in YouTube mode
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I can download the transcript in "srt" format
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_OEoXaMPEzfM.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Hi, welcome to Edx." text in the captions
self.assertIn('Hi, welcome to Edx.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Hi, welcome to Edx."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Hi, welcome to Edx.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_fullscreen_video_alignment_on_transcript_toggle(self):
"""
Scenario: Video is aligned correctly on transcript toggle in fullscreen mode
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
And the video with the transcript hidden is aligned correctly
"""
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
data = {'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
# click video button "CC"
self.video.click_player_button('CC')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_video_rendering_with_default_response_time(self):
"""
Scenario: Video is rendered in Youtube mode when the YouTube Server responds quickly
Given the YouTube server response time less than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "Youtube" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 0.4
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_video_rendering_wo_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 when the YouTube Server responds slowly
Given the YouTube server response time is greater than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 2.0
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
def test_video_with_youtube_blocked(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube server response time is greater than 1.5 seconds
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
"""
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
def test_download_transcript_button_works_correctly(self):
"""
Scenario: Download Transcript button works correctly
Given the course has Video components A and B in "Youtube" mode
And Video component C in "HTML5" mode
And I have defined downloadable transcripts for the videos
Then I can download a transcript for Video A in "srt" format
And I can download a transcript for Video A in "txt" format
And I can download a transcript for Video B in "txt" format
And the Download Transcript menu does not exist for Video C
"""
data_a = {'sub': 'OEoXaMPEzfM', 'download_track': True}
youtube_a_metadata = self.metadata_for_mode('youtube', additional_data=data_a)
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
data_b = {'youtube_id_1_0': 'b7xgknqkQk8', 'sub': 'b7xgknqkQk8', 'download_track': True}
youtube_b_metadata = self.metadata_for_mode('youtube', additional_data=data_b)
self.assets.append('subs_b7xgknqkQk8.srt.sjson')
data_c = {'track': 'http://example.org/', 'download_track': True}
html5_c_metadata = self.metadata_for_mode('html5', additional_data=data_c)
self.verticals = [
[{'display_name': 'A', 'metadata': youtube_a_metadata}],
[{'display_name': 'B', 'metadata': youtube_b_metadata}],
[{'display_name': 'C', 'metadata': html5_c_metadata}]
]
# open the section with videos (open video "A")
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "00:00:00,270"
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', '00:00:00,270'))
# select the transcript format "txt"
self.assertTrue(self.video.select_transcript_format('txt'))
# check if we can download transcript in "txt" format that has text "Hi, welcome to Edx."
self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Hi, welcome to Edx.'))
# open video "B"
self.course_nav.go_to_sequential('B')
# check if we can download transcript in "txt" format that has text "Equal transcripts"
self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Equal transcripts'))
# open video "C"
self.course_nav.go_to_sequential('C')
# menu "download_transcript" doesn't exist
self.assertFalse(self.video.is_menu_exist('download_transcript'))
def test_video_language_menu_working(self):
"""
Scenario: Language menu works correctly in Video component
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "zh"
Then I see "好 各位同学" text in the captions
And I select language with code "en"
Then I see "Hi, welcome to Edx." text in the captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_OEoXaMPEzfM.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.hide_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages(), correct_languages)
self.video.select_language('zh')
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.video.select_language('en')
self.assertIn('Hi, welcome to Edx.', self.video.captions_text)
class YouTubeHtml5VideoTest(VideoBaseTest):
""" Test YouTube HTML5 Video Player """
def setUp(self):
super(YouTubeHtml5VideoTest, self).setUp()
def test_youtube_video_rendering_with_unsupported_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode
with HTML5 sources that doesn't supported by browser
Given the course has a Video component in "Youtube_HTML5_Unsupported_Video" mode
Then the video has rendered in "Youtube" mode
"""
self.metadata = self.metadata_for_mode('youtube_html5_unsupported_video')
self.navigate_to_video()
# Verify that the video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
class Html5VideoTest(VideoBaseTest):
""" Test HTML5 Video Player """
def setUp(self):
super(Html5VideoTest, self).setUp()
def test_autoplay_disabled_for_video_component(self):
"""
Scenario: Autoplay is disabled by default for a Video component
Given the course has a Video component in "HTML5" mode
When I view the Video component
Then it does not have autoplay enabled
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
# Verify that the video has autoplay mode disabled
self.assertFalse(self.video.is_autoplay_enabled)
def test_html5_video_rendering_with_unsupported_sources(self):
"""
Scenario: LMS displays an error message for HTML5 sources that are not supported by browser
Given the course has a Video component in "HTML5_Unsupported_Video" mode
When I view the Video component
Then and error message is shown
And the error message has the correct text
"""
self.metadata = self.metadata_for_mode('html5_unsupported_video')
self.navigate_to_video_no_render()
# Verify that error message is shown
self.assertTrue(self.video.is_error_message_shown)
# Verify that error message has correct text
correct_error_message_text = 'No playable video sources found.'
self.assertIn(correct_error_message_text, self.video.error_message_text)
# Verify that spinner is not shown
self.assertFalse(self.video.is_spinner_shown())
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_OEoXaMPEzfM.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Hi, welcome to Edx." text in the captions
self.assertIn('Hi, welcome to Edx.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Hi, welcome to Edx."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Hi, welcome to Edx.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
#Then I can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_full_screen_video_alignment_with_transcript_visible(self):
"""
Scenario: Video is aligned correctly with transcript enabled in fullscreen mode
Given the course has a Video component in "HTML5" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I show the captions
And I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
"""
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
data = {'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
def test_cc_button_with_english_transcript(self):
"""
Scenario: CC button works correctly with only english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined english subtitles for the video
And I have uploaded an english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
data = {'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "Hi, welcome to Edx." text in the captions
self.assertIn("Hi, welcome to Edx.", self.video.captions_text)
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('chinese_transcripts.srt')
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_video_rendering(self):
"""
Scenario: Video component is fully rendered in the LMS in HTML5 mode
Given the course has a Video component in "HTML5" mode
Then the video has rendered in "HTML5" mode
And video sources are correct
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
self.assertTrue(all([source in HTML5_SOURCES for source in self.video.sources()]))
|
nanolearning/edx-platform
|
common/test/acceptance/tests/test_video_module.py
|
Python
|
agpl-3.0
| 29,937
|
[
"VisIt"
] |
c7c023e5f1530e39d8dcb2811d55194d53697a970e2ae75f402ef0728789d738
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import sys
import parser
import symbol
import token
import ast
import inspect
from ..import meta
class expression_validator(ast.NodeVisitor):
"""
This tree walk attempts to validate an expression: that the expression
should *not* contain certain names.
This is used for the case
x = 10
lambda x: fn(x+15, x)
Really, the "x+15" expression is invalid since the expression uses an
lambda argument. However, it does evaluate correctly in the scope
since "x" also exists in the function scope.
We thus need to validate the expression before attempting to evaluate it
so that the expression must not contain a lambda argument.
This validator here is a lot stricter than it should since it will also
prevent all cases where something with the same name as the lambda argument
is created in an inner scope. For instance:
lambda x: fn((lambda x: x + 15)(5), x)
lambda x: fn(([x for x in [1,2,3]], x)
"""
def __init__(self, blocked_symbols):
self.blocked_symbols = blocked_symbols
def visit_Name(self, node):
if node.id in self.blocked_symbols:
raise RuntimeError("Blocked symbols encountered")
class attribute_reader(ast.NodeVisitor):
"""
Things like gl.extensions._demo_add
get parsed as
Attribute(value=Attribute(value=Name(id='gl', ctx=Load()),
attr='extensions', ctx=Load()), attr='_demo_add', ctx=Load())
This causes problems for
lambda x: gl.extensions._demo_add(x, 5)
We need to breakdown the attribute into the original string
"""
def default(self, node):
raise NotImplementedError("Cannot process token at " +
str(node.lineno) + ":" + str(node.col_offset))
def visit_Name(self, node):
return node.id
def visit_Attribute(self, node):
s = self.visit(node.value)
return s + "." + node.attr
class Parameter(object):
def __init__(self, name):
self.name = name
def __str__(self):
return 'λ' + self.name
def __repr__(self):
return str(self)
class lambda_closure_visitor(ast.NodeVisitor):
"""
This implements a *very* limited decompiler. It only handles cases of
lambda x: fn(a, b, x, ...)
where a,b, etc are variables captured from the surrounding scope, and there
may be some occurances of x.
No additional statements or expressions are permitted
"""
FUNCTION = 0 # I am translating the wrapping lambda function
INNER_CALL = 1 # I am translating the function call inside
PARAMETER = 2 # I am just translating a function parameter
def __init__(self):
# The fn
self.closure_fn_name = ""
# A list of captured positional arguments
# lambda parameters are denoted by being of type Parameter
self.positional_args = []
# A dictionary of captured named arguments
# lambda parameters are denoted by being of type Parameter
self.named_args = {}
# List of all the input argument names
self.input_arg_names = []
self.caller_globals = []
self.state = self.FUNCTION
def default(self, node):
raise NotImplementedError("Cannot process token at " +
str(node.lineno) + ":" + str(node.col_offset))
def __repr__(self):
return str(self)
def __str__(self):
ret = self.closure_fn_name + "("
comma = False
for i in self.positional_args:
if comma:
ret = ret + ','
ret = ret + str(i)
comma = True
for i in self.named_args:
if comma:
ret = ret + ','
ret = ret + i + ":" + str(self.named_args[i])
comma = True
ret = ret + ")"
return ret
def translate_ast(self, ast_node):
#print(ast.dump(ast_node))
t = self.visit(ast_node)
def visit_Module(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected module in position " +
str(node.lineno) + ":" + str(node.col_offset))
for line in node.body:
self.visit(line)
def visit_Call(self, node):
if (self.state != self.INNER_CALL):
raise NotImplementedError("Unexpected call in position " +
str(node.lineno) + ":" + str(node.col_offset))
self.state = self.INNER_CALL
# this is the main closure function call
if self.closure_fn_name != "":
raise NotImplementedError("Cannot translate function call " +
str(node.lineno) + ":" + str(node.col_offset))
elif type(node.func) is ast.Name:
self.closure_fn_name = node.func.id
elif type(node.func) is ast.Attribute:
self.closure_fn_name = attribute_reader().visit(node.func)
else:
raise NotImplementedError("Unexpected type of function call.")
self.state = self.PARAMETER
for i in range(len(node.args)):
arg = node.args[i]
if type(arg) is ast.Name and arg.id in self.input_arg_names:
self.positional_args += [Parameter(arg.id)]
else:
try:
expression_validator(self.input_arg_names).visit(arg)
# try to evaluate the ast
result = eval(compile(ast.Expression(arg), '<string>', 'eval'), self.caller_globals)
except:
raise NotImplementedError("Only simple expressions not using the function arguments are permitted")
self.positional_args += [result]
# keyword arguments next
keywordargs = {i.arg:i.value for i in node.keywords}
for i in keywordargs:
arg = keywordargs[i]
if type(arg) is ast.Name and arg.id in self.input_arg_names:
self.named_args[i] = Parameter(arg.id)
else:
try:
expression_validator(self.input_arg_names).visit(arg)
# try to evaluate the ast
result = eval(compile(ast.Expression(arg), '<string>', 'eval'), self.caller_globals)
except:
raise NotImplementedError("Only simple expressions not using the function arguments are permitted")
self.named_args[i] = result
def visit_arguments(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected function")
self.input_arg_names = [arg.id for arg in node.args]
def visit_Name(self, node):
raise NotImplementedError("Unexpected name")
def visit_Return(self, node):
if (self.state != self.INNER_CALL):
raise NotImplementedError("Unexpected return")
return self.visit(node.value)
def visit_Lambda(self, node):
return self.visit_FunctionDef(node)
def visit_FunctionDef(self, node):
if (self.state != self.FUNCTION):
raise NotImplementedError("Unexpected function")
self.visit(node.args)
self.state = self.INNER_CALL
if type(node.body) is list:
next_node = node.body[0]
# there is this annoying condition in which if there is a doc string,
# it actually shows up in the ast as a Expr.str
# so we need to catch that and skip it
try:
if type(next_node) is ast.Expr and type(next_node.value) is ast.Str:
# this is *probably* a doc string!
next_node = node.body[1]
except:
# just in case the above fails for various reasons like say...
# there is *only* a doc string. We still fail with the
# appropriate error
pass
else:
next_node = node.body
if type(next_node) is ast.Call:
self.visit(next_node)
elif type(next_node) is ast.Return and type(next_node.value) is ast.Call:
self.visit(next_node.value)
else:
raise NotImplementedError("Function must comprise of just a function call ")
def visit_ClassDef(self, node):
raise NotImplementedError("Classes are not implemented")
def _isalambda(v):
return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>'
def translate(fn):
visitor = lambda_closure_visitor()
visitor.caller_globals = fn.func_globals.copy()
# now. annoyingly enough certain captures are not here. We need to
# look in func_closures for it
if fn.func_closure:
closure = dict(zip(fn.func_code.co_freevars, (c.cell_contents for c in fn.func_closure)))
# inject closure into "caller_globals"
for i in closure:
visitor.caller_globals[i] = closure[i]
ast_node = None
try:
if not _isalambda(fn):
ast_node = ast.parse(inspect.getsource(fn))
except:
pass
try:
if ast_node == None:
ast_node = meta.decompiler.decompile_func(fn)
except:
pass
if ast_node is None:
raise RuntimeError("Cannot process provided function")
visitor.translate_ast(ast_node)
return visitor
# if __name__ == "__main__":
# if len(sys.argv) <= 1:
# print("Usage:\n\t./Lua_Translator.py <FILENAME>\n")
# exit(-1)
# f = open(sys.argv[1] , 'r')
# l = f.readlines()
# f.close()
# s = ""
#
# for x in l:
# s = s + x
#
# ast_node = ast.parse(s)
#
# f = open(sys.argv[1].rpartition(".")[0] + "_trans.lua", 'w')
# test = translator_NodeVisitor(f)
# test.translate_ast(ast_node)
# f.close()
|
thirdwing/SFrame
|
oss_src/unity/python/sframe/util/lambda_closure_capture.py
|
Python
|
bsd-3-clause
| 10,020
|
[
"VisIt"
] |
1b03a602c13d05e8ffbaf2c7e443594c920f01303893e35ab8937d3ca773b28a
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_idle/client_idle_filter.cc',
'src/core/ext/filters/client_idle/idle_filter_state.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/rbac/rbac_filter.cc',
'src/core/ext/filters/rbac/rbac_service_config_parser.cc',
'src/core/ext/filters/server_config_selector/server_config_selector.cc',
'src/core/ext/filters/server_config_selector/server_config_selector_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c',
'src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/api/httpbody.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/security.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/status.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/extension.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_channel_stack_modifier.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_cluster.cc',
'src/core/ext/xds/xds_common_types.cc',
'src/core/ext/xds/xds_endpoint.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_http_rbac_filter.cc',
'src/core/ext/xds/xds_listener.cc',
'src/core/ext/xds/xds_resource_type.cc',
'src/core/ext/xds/xds_route_config.cc',
'src/core/ext/xds/xds_routing.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/handshaker.cc',
'src/core/lib/channel/handshaker_registry.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/status_helper.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_authorization_engine.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/authorization/matchers.cc',
'src/core/lib/security/authorization/rbac_policy.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/channel_creds_registry_init.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_extra.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/key_logging/ssl_key_logging.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
'third_party/abseil-cpp/absl/base/internal/cycleclock.cc',
'third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc',
'third_party/abseil-cpp/absl/base/internal/raw_logging.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock_wait.cc',
'third_party/abseil-cpp/absl/base/internal/sysinfo.cc',
'third_party/abseil-cpp/absl/base/internal/thread_identity.cc',
'third_party/abseil-cpp/absl/base/internal/throw_delegate.cc',
'third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc',
'third_party/abseil-cpp/absl/base/log_severity.cc',
'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc',
'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc',
'third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc',
'third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc',
'third_party/abseil-cpp/absl/debugging/internal/demangle.cc',
'third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc',
'third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc',
'third_party/abseil-cpp/absl/debugging/stacktrace.cc',
'third_party/abseil-cpp/absl/debugging/symbolize.cc',
'third_party/abseil-cpp/absl/hash/internal/city.cc',
'third_party/abseil-cpp/absl/hash/internal/hash.cc',
'third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc',
'third_party/abseil-cpp/absl/numeric/int128.cc',
'third_party/abseil-cpp/absl/profiling/internal/exponential_biased.cc',
'third_party/abseil-cpp/absl/random/discrete_distribution.cc',
'third_party/abseil-cpp/absl/random/gaussian_distribution.cc',
'third_party/abseil-cpp/absl/random/internal/pool_urbg.cc',
'third_party/abseil-cpp/absl/random/internal/randen.cc',
'third_party/abseil-cpp/absl/random/internal/randen_detect.cc',
'third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc',
'third_party/abseil-cpp/absl/random/internal/randen_round_keys.cc',
'third_party/abseil-cpp/absl/random/internal/randen_slow.cc',
'third_party/abseil-cpp/absl/random/internal/seed_material.cc',
'third_party/abseil-cpp/absl/random/seed_gen_exception.cc',
'third_party/abseil-cpp/absl/random/seed_sequences.cc',
'third_party/abseil-cpp/absl/status/status.cc',
'third_party/abseil-cpp/absl/status/status_payload_printer.cc',
'third_party/abseil-cpp/absl/status/statusor.cc',
'third_party/abseil-cpp/absl/strings/ascii.cc',
'third_party/abseil-cpp/absl/strings/charconv.cc',
'third_party/abseil-cpp/absl/strings/cord.cc',
'third_party/abseil-cpp/absl/strings/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_internal.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_functions.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_handle.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_info.cc',
'third_party/abseil-cpp/absl/strings/internal/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/memutil.cc',
'third_party/abseil-cpp/absl/strings/internal/ostringstream.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/output.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc',
'third_party/abseil-cpp/absl/strings/internal/utf8.cc',
'third_party/abseil-cpp/absl/strings/match.cc',
'third_party/abseil-cpp/absl/strings/numbers.cc',
'third_party/abseil-cpp/absl/strings/str_cat.cc',
'third_party/abseil-cpp/absl/strings/str_replace.cc',
'third_party/abseil-cpp/absl/strings/str_split.cc',
'third_party/abseil-cpp/absl/strings/string_view.cc',
'third_party/abseil-cpp/absl/strings/substitute.cc',
'third_party/abseil-cpp/absl/synchronization/barrier.cc',
'third_party/abseil-cpp/absl/synchronization/blocking_counter.cc',
'third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc',
'third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc',
'third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc',
'third_party/abseil-cpp/absl/synchronization/internal/waiter.cc',
'third_party/abseil-cpp/absl/synchronization/mutex.cc',
'third_party/abseil-cpp/absl/synchronization/notification.cc',
'third_party/abseil-cpp/absl/time/civil_time.cc',
'third_party/abseil-cpp/absl/time/clock.cc',
'third_party/abseil-cpp/absl/time/duration.cc',
'third_party/abseil-cpp/absl/time/format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/civil_time_detail.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc',
'third_party/abseil-cpp/absl/time/time.cc',
'third_party/abseil-cpp/absl/types/bad_optional_access.cc',
'third_party/abseil-cpp/absl/types/bad_variant_access.cc',
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/name_print.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc',
'third_party/boringssl-with-bazel/src/ssl/extensions.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
'third_party/cares/cares/src/lib/ares__close_sockets.c',
'third_party/cares/cares/src/lib/ares__get_hostent.c',
'third_party/cares/cares/src/lib/ares__parse_into_addrinfo.c',
'third_party/cares/cares/src/lib/ares__read_line.c',
'third_party/cares/cares/src/lib/ares__readaddrinfo.c',
'third_party/cares/cares/src/lib/ares__sortaddrinfo.c',
'third_party/cares/cares/src/lib/ares__timeval.c',
'third_party/cares/cares/src/lib/ares_android.c',
'third_party/cares/cares/src/lib/ares_cancel.c',
'third_party/cares/cares/src/lib/ares_create_query.c',
'third_party/cares/cares/src/lib/ares_data.c',
'third_party/cares/cares/src/lib/ares_destroy.c',
'third_party/cares/cares/src/lib/ares_expand_name.c',
'third_party/cares/cares/src/lib/ares_expand_string.c',
'third_party/cares/cares/src/lib/ares_fds.c',
'third_party/cares/cares/src/lib/ares_free_hostent.c',
'third_party/cares/cares/src/lib/ares_free_string.c',
'third_party/cares/cares/src/lib/ares_freeaddrinfo.c',
'third_party/cares/cares/src/lib/ares_getaddrinfo.c',
'third_party/cares/cares/src/lib/ares_getenv.c',
'third_party/cares/cares/src/lib/ares_gethostbyaddr.c',
'third_party/cares/cares/src/lib/ares_gethostbyname.c',
'third_party/cares/cares/src/lib/ares_getnameinfo.c',
'third_party/cares/cares/src/lib/ares_getsock.c',
'third_party/cares/cares/src/lib/ares_init.c',
'third_party/cares/cares/src/lib/ares_library_init.c',
'third_party/cares/cares/src/lib/ares_llist.c',
'third_party/cares/cares/src/lib/ares_mkquery.c',
'third_party/cares/cares/src/lib/ares_nowarn.c',
'third_party/cares/cares/src/lib/ares_options.c',
'third_party/cares/cares/src/lib/ares_parse_a_reply.c',
'third_party/cares/cares/src/lib/ares_parse_aaaa_reply.c',
'third_party/cares/cares/src/lib/ares_parse_caa_reply.c',
'third_party/cares/cares/src/lib/ares_parse_mx_reply.c',
'third_party/cares/cares/src/lib/ares_parse_naptr_reply.c',
'third_party/cares/cares/src/lib/ares_parse_ns_reply.c',
'third_party/cares/cares/src/lib/ares_parse_ptr_reply.c',
'third_party/cares/cares/src/lib/ares_parse_soa_reply.c',
'third_party/cares/cares/src/lib/ares_parse_srv_reply.c',
'third_party/cares/cares/src/lib/ares_parse_txt_reply.c',
'third_party/cares/cares/src/lib/ares_platform.c',
'third_party/cares/cares/src/lib/ares_process.c',
'third_party/cares/cares/src/lib/ares_query.c',
'third_party/cares/cares/src/lib/ares_search.c',
'third_party/cares/cares/src/lib/ares_send.c',
'third_party/cares/cares/src/lib/ares_strcasecmp.c',
'third_party/cares/cares/src/lib/ares_strdup.c',
'third_party/cares/cares/src/lib/ares_strerror.c',
'third_party/cares/cares/src/lib/ares_strsplit.c',
'third_party/cares/cares/src/lib/ares_timeout.c',
'third_party/cares/cares/src/lib/ares_version.c',
'third_party/cares/cares/src/lib/ares_writev.c',
'third_party/cares/cares/src/lib/bitncmp.c',
'third_party/cares/cares/src/lib/inet_net_pton.c',
'third_party/cares/cares/src/lib/inet_ntop.c',
'third_party/cares/cares/src/lib/windows_port.c',
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
'third_party/upb/third_party/utf8_range/naive.c',
'third_party/upb/third_party/utf8_range/range2-neon.c',
'third_party/upb/third_party/utf8_range/range2-sse.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
]
ASM_SOURCE_FILES = {
'crypto_ios_aarch64': [
'third_party/boringssl-with-bazel/ios-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_ios_arm': [
'third_party/boringssl-with-bazel/ios-arm/crypto/chacha/chacha-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/aesv8-armx32.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/armv4-mont.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/bsaes-armv7.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/ghash-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/ghashv8-armx32.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha1-armv4-large.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha256-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha512-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/vpaes-armv7.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/test/trampoline-armv4.S',
],
'crypto_linux_aarch64': [
'third_party/boringssl-with-bazel/linux-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_linux_arm': [
'third_party/boringssl-with-bazel/linux-arm/crypto/chacha/chacha-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/aesv8-armx32.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/armv4-mont.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/bsaes-armv7.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/ghash-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/ghashv8-armx32.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha1-armv4-large.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha256-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha512-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/vpaes-armv7.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/test/trampoline-armv4.S',
'third_party/boringssl-with-bazel/src/crypto/curve25519/asm/x25519-asm-arm.S',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm_asm.S',
],
'crypto_linux_ppc64le': [
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/fipsmodule/aesp8-ppc.S',
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/fipsmodule/ghashp8-ppc.S',
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/test/trampoline-ppc.S',
],
'crypto_linux_x86': [
'third_party/boringssl-with-bazel/linux-x86/crypto/chacha/chacha-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/aesni-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/bn-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/co-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/ghash-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/md5-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha1-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha256-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha512-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/vpaes-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/x86-mont.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/test/trampoline-x86.S',
],
'crypto_linux_x86_64': [
'third_party/boringssl-with-bazel/linux-x86_64/crypto/chacha/chacha-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/aesni-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/ghash-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/md5-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/p256-x86_64-asm.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/rdrand-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/rsaz-avx2.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha1-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha256-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha512-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/vpaes-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/x86_64-mont.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/x86_64-mont5.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/test/trampoline-x86_64.S',
'third_party/boringssl-with-bazel/src/crypto/hrss/asm/poly_rq_mul.S',
],
'crypto_mac_x86': [
'third_party/boringssl-with-bazel/mac-x86/crypto/chacha/chacha-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/aesni-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/bn-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/co-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/ghash-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/md5-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha1-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha256-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha512-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/vpaes-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/x86-mont.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/test/trampoline-x86.S',
],
'crypto_mac_x86_64': [
'third_party/boringssl-with-bazel/mac-x86_64/crypto/chacha/chacha-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/aesni-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/ghash-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/md5-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/p256-x86_64-asm.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/rdrand-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/rsaz-avx2.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha1-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha256-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha512-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/vpaes-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/x86_64-mont.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/x86_64-mont5.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/test/trampoline-x86_64.S',
],
'crypto_win_aarch64': [
'third_party/boringssl-with-bazel/win-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_win_x86': [
'third_party/boringssl-with-bazel/win-x86/crypto/chacha/chacha-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/aesni-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/bn-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/co-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/ghash-ssse3-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/ghash-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/md5-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha1-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha256-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha512-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/vpaes-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/x86-mont.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/test/trampoline-x86.asm',
],
'crypto_win_x86_64': [
'third_party/boringssl-with-bazel/win-x86_64/crypto/chacha/chacha-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/aesni-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/ghash-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/md5-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/p256-x86_64-asm.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/rdrand-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/rsaz-avx2.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha1-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha256-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha512-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/vpaes-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/x86_64-mont.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/x86_64-mont5.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/test/trampoline-x86_64.asm',
],
}
|
grpc/grpc
|
src/python/grpcio/grpc_core_dependencies.py
|
Python
|
apache-2.0
| 88,503
|
[
"ORCA"
] |
54f03fd95a43cf1e9e4e918dbe69146d412e068506bfc441cff762263751d50c
|
# -*- coding: utf-8 -*-
"""Some utility functions"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import warnings
import logging
import time
from distutils.version import LooseVersion
import os
import os.path as op
from functools import wraps
import inspect
from string import Formatter
import subprocess
import sys
import tempfile
import shutil
from shutil import rmtree
from math import log, ceil
import json
import ftplib
import hashlib
from functools import partial
import atexit
import numpy as np
from scipy import linalg, sparse
from .externals.six.moves import urllib
from .externals.six import string_types, StringIO, BytesIO
from .externals.decorator import decorator
from .fixes import isclose
logger = logging.getLogger('mne') # one selection here used across mne-python
logger.propagate = False # don't propagate (in case of multiple imports)
def _memory_usage(*args, **kwargs):
if isinstance(args[0], tuple):
args[0][0](*args[0][1], **args[0][2])
elif not isinstance(args[0], int): # can be -1 for current use
args[0]()
return [-1]
try:
from memory_profiler import memory_usage
except ImportError:
memory_usage = _memory_usage
def nottest(f):
"""Decorator to mark a function as not a test"""
f.__test__ = False
return f
###############################################################################
# RANDOM UTILITIES
def _get_call_line(in_verbose=False):
"""Helper to get the call line from within a function"""
# XXX Eventually we could auto-triage whether in a `verbose` decorated
# function or not.
# NB This probably only works for functions that are undecorated,
# or decorated by `verbose`.
back = 2 if not in_verbose else 4
call_frame = inspect.getouterframes(inspect.currentframe())[back][0]
return inspect.getframeinfo(call_frame).code_context[0].strip()
def _sort_keys(x):
"""Sort and return keys of dict"""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def object_hash(x, h=None):
"""Hash a reasonable python object
Parameters
----------
x : object
Object to hash. Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
h : hashlib HASH object | None
Optional, object to add the hash to. None creates an MD5 hash.
Returns
-------
digest : int
The digest resulting from the hash.
"""
if h is None:
h = hashlib.md5()
if isinstance(x, dict):
keys = _sort_keys(x)
for key in keys:
object_hash(key, h)
object_hash(x[key], h)
elif isinstance(x, (list, tuple)):
h.update(str(type(x)).encode('utf-8'))
for xx in x:
object_hash(xx, h)
elif isinstance(x, bytes):
# must come before "str" below
h.update(x)
elif isinstance(x, (string_types, float, int, type(None))):
h.update(str(type(x)).encode('utf-8'))
h.update(str(x).encode('utf-8'))
elif isinstance(x, np.ndarray):
x = np.asarray(x)
h.update(str(x.shape).encode('utf-8'))
h.update(str(x.dtype).encode('utf-8'))
h.update(x.tostring())
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return int(h.hexdigest(), 16)
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as x1.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
out = ''
if type(a) != type(b):
out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
elif isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' x1 missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' x2 missing key %s\n' % key
else:
out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for xx1, xx2 in zip(a, b):
out += object_diff(xx1, xx2, pre='')
elif isinstance(a, (string_types, int, float, bytes)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' a is None, b is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not np.array_equal(a, b):
out += pre + ' array mismatch\n'
elif isinstance(a, (StringIO, BytesIO)):
if a.getvalue() != b.getvalue():
out += pre + ' StringIO mismatch\n'
elif sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def split_list(l, n):
"""split list in n (approx) equal pieces"""
n = int(n)
sz = len(l) // n
for i in range(n - 1):
yield l[i * sz:(i + 1) * sz]
yield l[(n - 1) * sz:]
def create_chunks(sequence, size):
"""Generate chunks from a sequence
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def sum_squared(X):
"""Compute norm of an array
Parameters
----------
X : array
Data whose norm must be found
Returns
-------
value : float
Sum of squares of the input array X
"""
X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
return np.dot(X_flat, X_flat)
def check_fname(fname, filetype, endings):
"""Enforce MNE filename conventions
Parameters
----------
fname : str
Name of the file.
filetype : str
Type of file. e.g., ICA, Epochs etc.
endings : tuple
Acceptable endings for the filename.
"""
print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])
if not fname.endswith(endings):
warnings.warn('This filename (%s) does not conform to MNE naming '
'conventions. All %s files should end with '
'%s' % (fname, filetype, print_endings))
class WrapStdOut(object):
"""Ridiculous class to work around how doctest captures stdout"""
def __getattr__(self, name):
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
return getattr(sys.stdout, name)
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
def __del__(self):
rmtree(self._path, ignore_errors=True)
def estimate_rank(data, tol=1e-4, return_singular=False,
norm=True, copy=True):
"""Helper to estimate the rank of data
This function will normalize the rows of the data (typically
channels or vertices) such that non-zero singular values
should be close to one.
Parameters
----------
data : array
Data to estimate the rank of (should be 2-dimensional).
tol : float
Tolerance for singular values to consider non-zero in
calculating the rank. The singular values are calculated
in this method such that independent data are expected to
have singular value around one.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
norm : bool
If True, data will be scaled by their estimated row-wise norm.
Else data are assumed to be scaled. Defaults to True.
copy : bool
If False, values in data will be modified in-place during
rank estimation (saves memory).
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
if copy is True:
data = data.copy()
if norm is True:
norms = _compute_row_norms(data)
data /= norms[:, np.newaxis]
s = linalg.svd(data, compute_uv=False, overwrite_a=True)
rank = np.sum(s >= tol)
if return_singular is True:
return rank, s
else:
return rank
def _compute_row_norms(data):
"""Compute scaling based on estimated norm"""
norms = np.sqrt(np.sum(data ** 2, axis=1))
norms[norms == 0] = 1.0
return norms
def _reject_data_segments(data, reject, flat, decim, info, tstep):
"""Reject data segments using peak-to-peak amplitude
"""
from .epochs import _is_good
from .io.pick import channel_indices_by_type
data_clean = np.empty_like(data)
idx_by_type = channel_indices_by_type(info)
step = int(ceil(tstep * info['sfreq']))
if decim is not None:
step = int(ceil(step / float(decim)))
this_start = 0
this_stop = 0
drop_inds = []
for first in range(0, data.shape[1], step):
last = first + step
data_buffer = data[:, first:last]
if data_buffer.shape[1] < (last - first):
break # end of the time segment
if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
flat, ignore_chs=info['bads']):
this_stop = this_start + data_buffer.shape[1]
data_clean[:, this_start:this_stop] = data_buffer
this_start += data_buffer.shape[1]
else:
logger.info("Artifact detected in [%d, %d]" % (first, last))
drop_inds.append((first, last))
data = data_clean[:, :this_stop]
if not data.any():
raise RuntimeError('No clean segment found. Please '
'consider updating your rejection '
'thresholds.')
return data, drop_inds
class _FormatDict(dict):
"""Helper for pformat()"""
def __missing__(self, key):
return "{" + key + "}"
def pformat(temp, **fmt):
"""Partially format a template string.
Examples
--------
>>> pformat("{a}_{b}", a='x')
'x_{b}'
"""
formatter = Formatter()
mapping = _FormatDict(fmt)
return formatter.vformat(temp, (), mapping)
def trait_wraith(*args, **kwargs):
# Stand in for traits to allow importing traits based modules when the
# traits library is not installed
return lambda x: x
###############################################################################
# DECORATORS
# Following deprecated class copied from scikit-learn
# force show of DeprecationWarning even on python 2.7
warnings.simplefilter('default')
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses::
>>> from mne.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<mne.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra: string
To be added to the deprecation messages.
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
# scikit-learn will not import on all platforms b/c it can be
# sklearn or scikits.learn, so a self-contained example is used above
def __init__(self, extra=''):
self.extra = extra
def __call__(self, obj):
"""Call
Parameters
----------
obj : object
Object to call.
"""
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def deprecation_wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = deprecation_wrapped
deprecation_wrapped.__name__ = '__init__'
deprecation_wrapped.__doc__ = self._update_doc(init.__doc__)
deprecation_wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def deprecation_wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
deprecation_wrapped.__name__ = fun.__name__
deprecation_wrapped.__dict__ = fun.__dict__
deprecation_wrapped.__doc__ = self._update_doc(fun.__doc__)
return deprecation_wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
@decorator
def verbose(function, *args, **kwargs):
"""Improved verbose decorator to allow functions to override log-level
Do not call this directly to set global verbosity level, instead use
set_log_level().
Parameters
----------
function : function
Function to be decorated by setting the verbosity level.
Returns
-------
dec : function
The decorated function
"""
arg_names = inspect.getargspec(function).args
default_level = verbose_level = None
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
if 'verbose' in arg_names:
verbose_level = args[arg_names.index('verbose')]
elif 'verbose' in kwargs:
verbose_level = kwargs.pop('verbose')
# This ensures that object.method(verbose=None) will use object.verbose
verbose_level = default_level if verbose_level is None else verbose_level
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
return function(*args, **kwargs)
finally:
set_log_level(old_level)
return function(*args, **kwargs)
@nottest
def slow_test(f):
"""Decorator for slow tests"""
f.slow_test = True
return f
@nottest
def ultra_slow_test(f):
"""Decorator for ultra slow tests"""
f.ultra_slow_test = True
f.slow_test = True
return f
def has_nibabel(vox2ras_tkr=False):
"""Determine if nibabel is installed
Parameters
----------
vox2ras_tkr : bool
If True, require nibabel has vox2ras_tkr support.
Returns
-------
has : bool
True if the user has nibabel.
"""
try:
import nibabel
out = True
if vox2ras_tkr: # we need MGHHeader to have vox2ras_tkr param
out = (getattr(getattr(getattr(nibabel, 'MGHImage', 0),
'header_class', 0),
'get_vox2ras_tkr', None) is not None)
return out
except ImportError:
return False
def has_mne_c():
"""Aux function"""
return 'MNE_ROOT' in os.environ
def has_freesurfer():
"""Aux function"""
return 'FREESURFER_HOME' in os.environ
def requires_nibabel(vox2ras_tkr=False):
"""Aux function"""
extra = ' with vox2ras_tkr support' if vox2ras_tkr else ''
return np.testing.dec.skipif(not has_nibabel(vox2ras_tkr),
'Requires nibabel%s' % extra)
def requires_version(library, min_version):
"""Helper for testing"""
return np.testing.dec.skipif(not check_version(library, min_version),
'Requires %s version >= %s'
% (library, min_version))
def requires_module(function, name, call):
"""Decorator to skip test if package is not available"""
try:
from nose.plugins.skip import SkipTest
except ImportError:
SkipTest = AssertionError
@wraps(function)
def dec(*args, **kwargs):
skip = False
try:
exec(call) in globals(), locals()
except Exception:
skip = True
if skip is True:
raise SkipTest('Test %s skipped, requires %s'
% (function.__name__, name))
return function(*args, **kwargs)
return dec
_pandas_call = """
import pandas
version = LooseVersion(pandas.__version__)
if version < '0.8.0':
raise ImportError
"""
_sklearn_call = """
required_version = '0.14'
import sklearn
version = LooseVersion(sklearn.__version__)
if version < required_version:
raise ImportError
"""
_sklearn_0_15_call = """
required_version = '0.15'
import sklearn
version = LooseVersion(sklearn.__version__)
if version < required_version:
raise ImportError
"""
_mayavi_call = """
from mayavi import mlab
mlab.options.backend = 'test'
"""
_mne_call = """
if not has_mne_c():
raise ImportError
"""
_fs_call = """
if not has_freesurfer():
raise ImportError
"""
_n2ft_call = """
if 'NEUROMAG2FT_ROOT' not in os.environ:
raise ImportError
"""
_fs_or_ni_call = """
if not has_nibabel() and not has_freesurfer():
raise ImportError
"""
requires_pandas = partial(requires_module, name='pandas', call=_pandas_call)
requires_sklearn = partial(requires_module, name='sklearn', call=_sklearn_call)
requires_sklearn_0_15 = partial(requires_module, name='sklearn',
call=_sklearn_0_15_call)
requires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)
requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
requires_freesurfer = partial(requires_module, name='Freesurfer',
call=_fs_call)
requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
call=_n2ft_call)
requires_fs_or_nibabel = partial(requires_module, name='nibabel or Freesurfer',
call=_fs_or_ni_call)
requires_tvtk = partial(requires_module, name='TVTK',
call='from tvtk.api import tvtk')
requires_statsmodels = partial(requires_module, name='statsmodels',
call='import statsmodels')
requires_patsy = partial(requires_module, name='patsy',
call='import patsy')
requires_pysurfer = partial(requires_module, name='PySurfer',
call='from surfer import Brain')
requires_PIL = partial(requires_module, name='PIL',
call='from PIL import Image')
requires_good_network = partial(
requires_module, name='good network connection',
call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
' raise ImportError')
requires_nitime = partial(requires_module, name='nitime',
call='import nitime')
requires_traits = partial(requires_module, name='traits',
call='import traits')
requires_h5py = partial(requires_module, name='h5py', call='import h5py')
def check_version(library, min_version):
"""Check minimum library version required
Parameters
----------
library : str
The library name to import. Must have a ``__version__`` property.
min_version : str
The minimum version string. Anything that matches
``'(\\d+ | [a-z]+ | \\.)'``
Returns
-------
ok : bool
True if the library exists with at least the specified version.
"""
ok = True
try:
library = __import__(library)
except ImportError:
ok = False
else:
this_version = LooseVersion(library.__version__)
if this_version < min_version:
ok = False
return ok
def _check_mayavi_version(min_version='4.3.0'):
"""Helper for mayavi"""
if not check_version('mayavi', min_version):
raise RuntimeError("Need mayavi >= %s" % min_version)
@verbose
def run_subprocess(command, verbose=None, *args, **kwargs):
"""Run command using subprocess.Popen
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str
Command to run as subprocess (see subprocess.Popen documentation).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
"""
for stdxxx, sys_stdxxx in (['stderr', sys.stderr],
['stdout', sys.stdout]):
if stdxxx not in kwargs:
kwargs[stdxxx] = subprocess.PIPE
elif kwargs[stdxxx] is sys_stdxxx:
if isinstance(sys_stdxxx, StringIO):
# nose monkey patches sys.stderr and sys.stdout to StringIO
kwargs[stdxxx] = subprocess.PIPE
else:
kwargs[stdxxx] = sys_stdxxx
# Check the PATH environment variable. If run_subprocess() is to be called
# frequently this should be refactored so as to only check the path once.
env = kwargs.get('env', os.environ)
if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
msg = ("Your PATH environment variable contains at least one path "
"starting with a tilde ('~') character. Such paths are not "
"interpreted correctly from within Python. It is recommended "
"that you use '$HOME' instead of '~'.")
warnings.warn(msg)
logger.info("Running subprocess: %s" % ' '.join(command))
try:
p = subprocess.Popen(command, *args, **kwargs)
except Exception:
logger.error('Command not found: %s' % (command[0],))
raise
stdout_, stderr = p.communicate()
stdout_ = '' if stdout_ is None else stdout_.decode('utf-8')
stderr = '' if stderr is None else stderr.decode('utf-8')
if stdout_.strip():
logger.info("stdout:\n%s" % stdout_)
if stderr.strip():
logger.info("stderr:\n%s" % stderr)
output = (stdout_, stderr)
if p.returncode:
print(output)
err_fun = subprocess.CalledProcessError.__init__
if 'output' in inspect.getargspec(err_fun).args:
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
###############################################################################
# LOGGING
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable MNE_LOGGING_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = get_config('MNE_LOGGING_LEVEL', 'INFO')
elif isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
logger = logging.getLogger('mne')
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
https://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
logger = logging.getLogger('mne')
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is True else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
###############################################################################
# CONFIG / PREFS
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
return subjects_dir
_temp_home_dir = None
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)"""
global _temp_home_dir
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
home_dir = os.getenv('APPDATA')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_config_path(home_dir=None):
"""Get path to standard mne-python config file
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%APPDATA%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir: str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing
Parameters
----------
memmap_min_size: str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, string_types):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size)
# List the known configuration values
known_config_types = [
'MNE_BROWSE_RAW_SIZE',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_LOGGING_LEVEL',
'MNE_USE_CUDA',
'SUBJECTS_DIR',
'MNE_CACHE_DIR',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS'
]
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = [
'MNE_STIM_CHANNEL',
]
def get_config(key=None, default=None, raise_error=False, home_dir=None):
"""Read mne(-python) preference from env, then mne-python config
Parameters
----------
key : None | str
The preference key to look for. The os evironment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in the path are returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
if key is not None and not isinstance(key, string_types):
raise TypeError('key must be a string')
# first, check to see if key is in env
if key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
key_found = False
val = default
else:
with open(config_path, 'r') as fid:
config = json.load(fid)
if key is None:
return config
key_found = key in config
val = config.get(key, default)
if not key_found and raise_error is True:
meth_1 = 'os.environ["%s"] = VALUE' % key
meth_2 = 'mne.utils.set_config("%s", VALUE)' % key
raise KeyError('Key "%s" not found in environment or in the '
'mne-python config file: %s '
'Try either:'
' %s for a temporary solution, or:'
' %s for a permanent one. You can also '
'set the environment variable before '
'running python.'
% (key, config_path, meth_1, meth_2))
return val
def set_config(key, value, home_dir=None):
"""Set mne-python preference in config
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
See Also
--------
get_config
"""
if not isinstance(key, string_types):
raise TypeError('key must be a string')
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
if not isinstance(value, string_types) and value is not None:
raise TypeError('value must be a string or None')
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warnings.warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
with open(config_path, 'r') as fid:
config = json.load(fid)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
else:
config[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
class ProgressBar(object):
"""Class for generating a command-line progressbar
Parameters
----------
max_value : int
Maximum value of process (e.g. number of samples to process, bytes to
download, etc.).
initial_value : int
Initial value of process, useful when resuming process from a specific
value, defaults to 0.
mesg : str
Message to include at end of progress bar.
max_chars : int
Number of characters to use for progress bar (be sure to save some room
for the message and % complete as well).
progress_character : char
Character in the progress bar that indicates the portion completed.
spinner : bool
Show a spinner. Useful for long-running processes that may not
increment the progress bar very often. This provides the user with
feedback that the progress has not stalled.
Example
-------
>>> progress = ProgressBar(13000)
>>> progress.update(3000) # doctest: +SKIP
[......... ] 23.07692 |
>>> progress.update(6000) # doctest: +SKIP
[.................. ] 46.15385 |
>>> progress = ProgressBar(13000, spinner=True)
>>> progress.update(3000) # doctest: +SKIP
[......... ] 23.07692 |
>>> progress.update(6000) # doctest: +SKIP
[.................. ] 46.15385 /
"""
spinner_symbols = ['|', '/', '-', '\\']
template = '\r[{0}{1}] {2:.05f} {3} {4} '
def __init__(self, max_value, initial_value=0, mesg='', max_chars=40,
progress_character='.', spinner=False, verbose_bool=True):
self.cur_value = initial_value
self.max_value = float(max_value)
self.mesg = mesg
self.max_chars = max_chars
self.progress_character = progress_character
self.spinner = spinner
self.spinner_index = 0
self.n_spinner = len(self.spinner_symbols)
self._do_print = verbose_bool
def update(self, cur_value, mesg=None):
"""Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
# Ensure floating-point division so we can get fractions of a percent
# for the progressbar.
self.cur_value = cur_value
progress = min(float(self.cur_value) / self.max_value, 1.)
num_chars = int(progress * self.max_chars)
num_left = self.max_chars - num_chars
# Update the message
if mesg is not None:
self.mesg = mesg
# The \r tells the cursor to return to the beginning of the line rather
# than starting a new line. This allows us to have a progressbar-style
# display in the console window.
bar = self.template.format(self.progress_character * num_chars,
' ' * num_left,
progress * 100,
self.spinner_symbols[self.spinner_index],
self.mesg)
# Force a flush because sometimes when using bash scripts and pipes,
# the output is not printed until after the program exits.
if self._do_print:
sys.stdout.write(bar)
sys.stdout.flush()
# Increament the spinner
if self.spinner:
self.spinner_index = (self.spinner_index + 1) % self.n_spinner
def update_with_increment_value(self, increment_value, mesg=None):
"""Update progressbar with the value of the increment instead of the
current value of process as in update()
Parameters
----------
increment_value : int
Value of the increment of process. The percent of the progressbar
will be computed as
(self.cur_value + increment_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
self.cur_value += increment_value
self.update(self.cur_value, mesg)
def _chunk_read(response, local_file, initial_size=0, verbose_bool=True):
"""Download a file chunk by chunk and show advancement
Can also be used when resuming downloads over http.
Parameters
----------
response: urllib.response.addinfourl
Response to the download request in order to get file size.
local_file: file
Hard disk file where data should be written.
initial_size: int, optional
If resuming, indicate the initial size of the file.
Notes
-----
The chunk size will be automatically adapted based on the connection
speed.
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
# Returns only amount left to download when resuming, not the size of the
# entire file
total_size = int(response.headers.get('Content-Length', '1').strip())
total_size += initial_size
progress = ProgressBar(total_size, initial_value=initial_size,
max_chars=40, spinner=True, mesg='downloading',
verbose_bool=verbose_bool)
chunk_size = 8192 # 2 ** 13
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.001:
chunk_size *= 2
elif dt > 0.5 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
if verbose_bool:
sys.stdout.write('\n')
sys.stdout.flush()
break
_chunk_write(chunk, local_file, progress)
def _chunk_read_ftp_resume(url, temp_file_name, local_file, verbose_bool=True):
"""Resume downloading of a file from an FTP server"""
# Adapted from: https://pypi.python.org/pypi/fileDownloader.py
# but with changes
parsed_url = urllib.parse.urlparse(url)
file_name = os.path.basename(parsed_url.path)
server_path = parsed_url.path.replace(file_name, "")
unquoted_server_path = urllib.parse.unquote(server_path)
local_file_size = os.path.getsize(temp_file_name)
data = ftplib.FTP()
if parsed_url.port is not None:
data.connect(parsed_url.hostname, parsed_url.port)
else:
data.connect(parsed_url.hostname)
data.login()
if len(server_path) > 1:
data.cwd(unquoted_server_path)
data.sendcmd("TYPE I")
data.sendcmd("REST " + str(local_file_size))
down_cmd = "RETR " + file_name
file_size = data.size(file_name)
progress = ProgressBar(file_size, initial_value=local_file_size,
max_chars=40, spinner=True, mesg='downloading',
verbose_bool=verbose_bool)
# Callback lambda function that will be passed the downloaded data
# chunk and will write it to file and update the progress bar
def chunk_write(chunk):
return _chunk_write(chunk, local_file, progress)
data.retrbinary(down_cmd, chunk_write)
data.close()
sys.stdout.write('\n')
sys.stdout.flush()
def _chunk_write(chunk, local_file, progress):
"""Write a chunk to file and update the progress bar"""
local_file.write(chunk)
progress.update_with_increment_value(len(chunk))
@verbose
def _fetch_file(url, file_name, print_destination=True, resume=True,
hash_=None, verbose=None):
"""Load requested file, downloading it if needed or requested
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
print_destination: bool, optional
If true, destination of where file was saved will be printed after
download finishes.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
if hash_ is not None and (not isinstance(hash_, string_types) or
len(hash_) != 32):
raise ValueError('Bad hash value given, should be a 32-character '
'string:\n%s' % (hash_,))
temp_file_name = file_name + ".part"
local_file = None
initial_size = 0
verbose_bool = (logger.level <= 20) # 20 is info
try:
# Checking file size and displaying it alongside the download url
u = urllib.request.urlopen(url, timeout=10.)
try:
file_size = int(u.headers.get('Content-Length', '1').strip())
finally:
u.close()
del u
logger.info('Downloading data from %s (%s)\n'
% (url, sizeof_fmt(file_size)))
# Downloading data
if resume and os.path.exists(temp_file_name):
local_file = open(temp_file_name, "ab")
# Resuming HTTP and FTP downloads requires different procedures
scheme = urllib.parse.urlparse(url).scheme
if scheme in ('http', 'https'):
local_file_size = os.path.getsize(temp_file_name)
# If the file exists, then only download the remainder
req = urllib.request.Request(url)
req.headers["Range"] = "bytes=%s-" % local_file_size
try:
data = urllib.request.urlopen(req)
except Exception:
# There is a problem that may be due to resuming, some
# servers may not support the "Range" header. Switch back
# to complete download method
logger.info('Resuming download failed. Attempting to '
'restart downloading the entire file.')
local_file.close()
_fetch_file(url, file_name, resume=False)
else:
_chunk_read(data, local_file, initial_size=local_file_size,
verbose_bool=verbose_bool)
data.close()
del data # should auto-close
else:
_chunk_read_ftp_resume(url, temp_file_name, local_file,
verbose_bool=verbose_bool)
else:
local_file = open(temp_file_name, "wb")
data = urllib.request.urlopen(url)
try:
_chunk_read(data, local_file, initial_size=initial_size,
verbose_bool=verbose_bool)
finally:
data.close()
del data # should auto-close
# temp file must be closed prior to the move
if not local_file.closed:
local_file.close()
# check md5sum
if hash_ is not None:
logger.info('Verifying download hash.')
md5 = md5sum(temp_file_name)
if hash_ != md5:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, md5))
shutil.move(temp_file_name, file_name)
if print_destination is True:
logger.info('File saved as %s.\n' % file_name)
except Exception as e:
logger.error('Error while fetching file %s.'
' Dataset fetching aborted.' % url)
logger.error("Error: %s", e)
raise
finally:
if local_file is not None:
if not local_file.closed:
local_file.close()
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str"""
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
"""Human friendly file size"""
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
def _url_to_local_path(url, path):
"""Mirror a url path in a local destination (keeping folder structure)"""
destination = urllib.parse.urlparse(url).path
# First char should be '/', and it needs to be discarded
if len(destination) < 2 or destination[0] != '/':
raise ValueError('Invalid URL')
destination = os.path.join(path,
urllib.request.url2pathname(destination)[1:])
return destination
def _get_stim_channel(stim_channel, info):
"""Helper to determine the appropriate stim_channel
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
info : instance of Info
An information structure containing information about the channels.
Returns
-------
stim_channel : str | list of str
The name of the stim channel(s) to use
"""
if stim_channel is not None:
if not isinstance(stim_channel, list):
if not isinstance(stim_channel, string_types):
raise TypeError('stim_channel must be a str, list, or None')
stim_channel = [stim_channel]
if not all(isinstance(s, string_types) for s in stim_channel):
raise TypeError('stim_channel list must contain all strings')
return stim_channel
stim_channel = list()
ch_count = 0
ch = get_config('MNE_STIM_CHANNEL')
while(ch is not None and ch in info['ch_names']):
stim_channel.append(ch)
ch_count += 1
ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
if ch_count > 0:
return stim_channel
if 'STI 014' in info['ch_names']:
return ['STI 014']
from .io.pick import pick_types
stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
if len(stim_channel) > 0:
stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
return stim_channel
raise ValueError("No stim channels found. Consider specifying them "
"manually using the 'stim_channel' parameter.")
def _check_fname(fname, overwrite):
"""Helper to check for file existence"""
if not isinstance(fname, string_types):
raise TypeError('file name is not a string')
if op.isfile(fname):
if not overwrite:
raise IOError('Destination file exists. Please use option '
'"overwrite=True" to force overwriting.')
else:
logger.info('Overwriting existing file.')
def _check_subject(class_subject, input_subject, raise_error=True):
"""Helper to get subject name from class"""
if input_subject is not None:
if not isinstance(input_subject, string_types):
raise ValueError('subject input must be a string')
else:
return input_subject
elif class_subject is not None:
if not isinstance(class_subject, string_types):
raise ValueError('Neither subject input nor class subject '
'attribute was a string')
else:
return class_subject
else:
if raise_error is True:
raise ValueError('Neither subject input nor class subject '
'attribute was a string')
return None
def _check_pandas_installed():
"""Aux function"""
try:
import pandas as pd
return pd
except ImportError:
raise RuntimeError('For this method to work the Pandas library is'
' required.')
def _check_pandas_index_arguments(index, defaults):
""" Helper function to check pandas index arguments """
if not any(isinstance(index, k) for k in (list, tuple)):
index = [index]
invalid_choices = [e for e in index if e not in defaults]
if invalid_choices:
options = [', '.join(e) for e in [invalid_choices, defaults]]
raise ValueError('[%s] is not an valid option. Valid index'
'values are \'None\' or %s' % tuple(options))
def _clean_names(names, remove_whitespace=False, before_dash=True):
""" Remove white-space on topo matching
This function handles different naming
conventions for old VS new VectorView systems (`remove_whitespace`).
Also it allows to remove system specific parts in CTF channel names
(`before_dash`).
Usage
-----
# for new VectorView (only inside layout)
ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
# for CTF
ch_names = _clean_names(epochs.ch_names, before_dash=True)
"""
cleaned = []
for name in names:
if ' ' in name and remove_whitespace:
name = name.replace(' ', '')
if '-' in name and before_dash:
name = name.split('-')[0]
if name.endswith('_virtual'):
name = name[:-8]
cleaned.append(name)
return cleaned
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
bad_names = ['MovedModule'] # this is in six.py, and causes bad things
for mod in list(sys.modules.values()):
if mod.__class__.__name__ not in bad_names and hasattr(mod, reg):
getattr(mod, reg).clear()
# hack to deal with old scipy/numpy in tests
if os.getenv('TRAVIS') == 'true' and sys.version.startswith('2.6'):
warnings.simplefilter('default')
try:
np.rank([])
except Exception:
pass
warnings.simplefilter('always')
def _check_type_picks(picks):
"""helper to guarantee type integrity of picks"""
err_msg = 'picks must be None, a list or an array of integers'
if picks is None:
pass
elif isinstance(picks, list):
if not all(isinstance(i, int) for i in picks):
raise ValueError(err_msg)
picks = np.array(picks)
elif isinstance(picks, np.ndarray):
if not picks.dtype.kind == 'i':
raise ValueError(err_msg)
else:
raise ValueError(err_msg)
return picks
@nottest
def run_tests_if_main(measure_mem=False):
"""Run tests in a given file if it is run as a script"""
local_vars = inspect.currentframe().f_back.f_locals
if not local_vars.get('__name__', '') == '__main__':
return
# we are in a "__main__"
try:
import faulthandler
faulthandler.enable()
except Exception:
pass
with warnings.catch_warnings(record=True): # memory_usage internal dep.
mem = int(round(max(memory_usage(-1)))) if measure_mem else -1
if mem >= 0:
print('Memory consumption after import: %s' % mem)
t0 = time.time()
peak_mem, peak_name = mem, 'import'
max_elapsed, elapsed_name = 0, 'N/A'
count = 0
for name in sorted(list(local_vars.keys()), key=lambda x: x.lower()):
val = local_vars[name]
if name.startswith('_'):
continue
elif callable(val) and name.startswith('test'):
count += 1
doc = val.__doc__.strip() if val.__doc__ else name
sys.stdout.write('%s ... ' % doc)
sys.stdout.flush()
try:
t1 = time.time()
if measure_mem:
with warnings.catch_warnings(record=True): # dep warn
mem = int(round(max(memory_usage((val, (), {})))))
else:
val()
mem = -1
if mem >= peak_mem:
peak_mem, peak_name = mem, name
mem = (', mem: %s MB' % mem) if mem >= 0 else ''
elapsed = int(round(time.time() - t1))
if elapsed >= max_elapsed:
max_elapsed, elapsed_name = elapsed, name
sys.stdout.write('time: %s sec%s\n' % (elapsed, mem))
sys.stdout.flush()
except Exception as err:
if 'skiptest' in err.__class__.__name__.lower():
sys.stdout.write('SKIP (%s)\n' % str(err))
sys.stdout.flush()
else:
raise
elapsed = int(round(time.time() - t0))
sys.stdout.write('Total: %s tests\n• %s sec (%s sec for %s)\n• Peak memory'
' %s MB (%s)\n' % (count, elapsed, max_elapsed,
elapsed_name, peak_mem, peak_name))
class ArgvSetter(object):
"""Temporarily set sys.argv"""
def __init__(self, args=(), disable_stdout=True, disable_stderr=True):
self.argv = list(('python',) + args)
self.stdout = StringIO() if disable_stdout else sys.stdout
self.stderr = StringIO() if disable_stderr else sys.stderr
def __enter__(self):
self.orig_argv = sys.argv
sys.argv = self.argv
self.orig_stdout = sys.stdout
sys.stdout = self.stdout
self.orig_stderr = sys.stderr
sys.stderr = self.stderr
return self
def __exit__(self, *args):
sys.argv = self.orig_argv
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
def md5sum(fname, block_size=1048576): # 2 ** 20
"""Calculate the md5sum for a file
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexidecimal digest of the hash.
"""
md5 = hashlib.md5()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def _sphere_to_cartesian(theta, phi, r):
"""Transform spherical coordinates to cartesian"""
z = r * np.sin(phi)
rcos_phi = r * np.cos(phi)
x = rcos_phi * np.cos(theta)
y = rcos_phi * np.sin(theta)
return x, y, z
def create_slices(start, stop, step=None, length=1):
""" Generate slices of time indexes
Parameters
----------
start : int
Index where first slice should start.
stop : int
Index where last slice should maximally end.
length : int
Number of time sample included in a given slice.
step: int | None
Number of time samples separating two slices.
If step = None, step = length.
Returns
-------
slices : list
List of slice objects.
"""
# default parameters
if step is None:
step = length
# slicing
slices = [slice(t, t + length, 1) for t in
range(start, stop - length + 1, step)]
return slices
def _time_mask(times, tmin=None, tmax=None, strict=False):
"""Helper to safely find sample boundaries"""
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
mask = (times >= tmin)
mask &= (times <= tmax)
if not strict:
mask |= isclose(times, tmin)
mask |= isclose(times, tmax)
return mask
def _get_fast_dot():
""""Helper to get fast dot"""
try:
from sklearn.utils.extmath import fast_dot
except ImportError:
fast_dot = np.dot
return fast_dot
def random_permutation(n_samples, random_state=None):
"""Helper to emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
random_state : int | None
Random seed for initializing the pseudo-random number generator.
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
idx = rng.rand(n_samples)
randperm = np.argsort(idx)
return randperm
def compute_corr(x, y):
"""Compute pearson correlations between a vector and a matrix"""
if len(x) == 0 or len(y) == 0:
raise ValueError('x or y has zero length')
fast_dot = _get_fast_dot()
X = np.array(x, float)
Y = np.array(y, float)
X -= X.mean(0)
Y -= Y.mean(0)
x_sd = X.std(0, ddof=1)
# if covariance matrix is fully expanded, Y needs a
# transpose / broadcasting else Y is correct
y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
return (fast_dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/mne/utils.py
|
Python
|
bsd-3-clause
| 63,668
|
[
"Mayavi"
] |
956311f5d5e1d4f0ea09151272d28ddaf30ca56ce8d005ad70e248ad0d6e9ca1
|
# Copyright 2004-2008 by M de Hoon.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Implements the Lowess function for nonparametric regression.
Functions:
lowess Fit a smooth nonparametric regression curve to a scatterplot.
For more information, see
William S. Cleveland: "Robust locally weighted regression and smoothing
scatterplots", Journal of the American Statistical Association, December 1979,
volume 74, number 368, pp. 829-836.
William S. Cleveland and Susan J. Devlin: "Locally weighted regression: An
approach to regression analysis by local fitting", Journal of the American
Statistical Association, September 1988, volume 83, number 403, pp. 596-610.
"""
from __future__ import print_function
from Bio._py3k import range
import numpy
try:
from Bio.Cluster import median
# The function median in Bio.Cluster is faster than the function median
# in NumPy, as it does not require a full sort.
except ImportError as x:
# Use the median function in NumPy if Bio.Cluster is not available
from numpy import median
def lowess(x, y, f=2. / 3., iter=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
x and y should be numpy float arrays of equal length. The return value is
also a numpy float array of that length.
e.g.
>>> import numpy
>>> x = numpy.array([4, 4, 7, 7, 8, 9, 10, 10, 10, 11, 11, 12, 12, 12,
... 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16,
... 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 20, 20, 20, 20,
... 20, 22, 23, 24, 24, 24, 24, 25], numpy.float)
>>> y = numpy.array([2, 10, 4, 22, 16, 10, 18, 26, 34, 17, 28, 14, 20, 24,
... 28, 26, 34, 34, 46, 26, 36, 60, 80, 20, 26, 54, 32, 40,
... 32, 40, 50, 42, 56, 76, 84, 36, 46, 68, 32, 48, 52, 56,
... 64, 66, 54, 70, 92, 93, 120, 85], numpy.float)
>>> result = lowess(x, y)
>>> len(result)
50
>>> print("[%0.2f, ..., %0.2f]" % (result[0], result[-1]))
[4.85, ..., 84.98]
"""
n = len(x)
r = int(numpy.ceil(f * n))
h = [numpy.sort(abs(x - x[i]))[r] for i in range(n)]
w = numpy.clip(abs(([x] - numpy.transpose([x])) / h), 0.0, 1.0)
w = 1 - w * w * w
w = w * w * w
yest = numpy.zeros(n)
delta = numpy.ones(n)
for iteration in range(iter):
for i in range(n):
weights = delta * w[:, i]
weights_mul_x = weights * x
b1 = numpy.dot(weights, y)
b2 = numpy.dot(weights_mul_x, y)
A11 = sum(weights)
A12 = sum(weights_mul_x)
A21 = A12
A22 = numpy.dot(weights_mul_x, x)
determinant = A11 * A22 - A12 * A21
beta1 = (A22 * b1 - A12 * b2) / determinant
beta2 = (A11 * b2 - A21 * b1) / determinant
yest[i] = beta1 + beta2 * x[i]
residuals = y - yest
s = median(abs(residuals))
delta[:] = numpy.clip(residuals / (6 * s), -1, 1)
delta[:] = 1 - delta * delta
delta[:] = delta * delta
return yest
def _test():
"""Run the Bio.Statistics.lowess module's doctests."""
print("Running doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Statistics/lowess.py
|
Python
|
gpl-2.0
| 4,003
|
[
"Biopython"
] |
516c8f1b2995a41e2c205992870674b7e8f1d2a23a930af90f1131e67d5d4149
|
# Copyright 2005 by Jonathan Taylor.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This module deals with CAPS markers.
A CAPS marker is a location a DifferentialCutsite as described below and a
set of primers that can be used to visualize this. More information can
be found in the paper located at:
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Retrieve&db=PubMed&list_uids=8106085&dopt=Abstract
Copyright Jonathan Taylor 2005
"""
class DifferentialCutsite(object):
"""A differential cutsite is a location in an alignment where an enzyme cuts
at least one sequence and also cannot cut at least one other sequence.
Members:
start Where it lives in the alignment.
enzyme The enzyme that causes this.
cuts_in A list of sequences (as indexes into the alignment) the
enzyme cuts in.
blocked_in A list of sequences (as indexes into the alignment) the
enzyme is blocked in.
"""
def __init__(self, **kwds):
"""Initialize a DifferentialCutsite.
Each member (as listed in the class description) should be included as a
keyword.
"""
self.start = int(kwds["start"])
self.enzyme = kwds["enzyme"]
self.cuts_in = kwds["cuts_in"]
self.blocked_in = kwds["blocked_in"]
class AlignmentHasDifferentLengthsError(Exception):
pass
class CAPSMap(object):
"""A map of an alignment showing all possible dcuts.
Members:
alignment The alignment that is mapped.
dcuts A list of possible CAPS markers in the form of
DifferentialCutsites.
"""
def __init__(self, alignment, enzymes = []):
"""Initialize the CAPSMap
Required:
alignment The alignment to be mapped.
Optional:
enzymes The enzymes to be used to create the map.
"""
self.sequences = [rec.seq for rec in alignment]
self.size = len(self.sequences)
self.length = len(self.sequences[0])
for seq in self.sequences:
if len(seq) != self.length:
raise AlignmentHasDifferentLengthsError
self.alignment = alignment
self.enzymes = enzymes
# look for dcuts
self._digest()
def _digest_with(self, enzyme):
cuts = {}
all = []
# go through each sequence
for seq in self.sequences:
# grab all the cuts in the sequence
cuts[seq] = [cut - enzyme.fst5 for cut in enzyme.search(seq)]
# maintain a list of all cuts in all sequences
all.extend(cuts[seq])
# we sort the all list and remove duplicates
all.sort()
last = -999
new = []
for cut in all:
if cut != last:
new.append(cut)
last = cut
all = new
# all now has indices for all sequences in the alignment
for cut in all:
# test for dcuts
cuts_in = []
blocked_in = []
for i in range(0, self.size):
seq = self.sequences[i]
if cut in cuts[seq]:
cuts_in.append(i)
else:
blocked_in.append(i)
if cuts_in != [] and blocked_in != []:
self.dcuts.append(DifferentialCutsite(start = cut, enzyme = enzyme, cuts_in = cuts_in, blocked_in = blocked_in))
def _digest(self):
self.dcuts = []
for enzyme in self.enzymes:
self._digest_with(enzyme)
|
bryback/quickseq
|
genescript/Bio/CAPS/__init__.py
|
Python
|
mit
| 3,733
|
[
"Biopython"
] |
535ea304cfe8566b0d8e6970482c9216299f15dbb4014da0fe2cd577ead7bcaf
|
import rnftools
from .Source import *
import os
import snakemake
import re
class DwgSim(Source):
"""Class for DWGsim (https://github.com/nh13/DWGSIM/wiki).
Both single-end and paired-end simulations are supported. In paired-end simulations,
reads can have different lengths. Note that there is a bug in DWGsim documentation:
coordinates are 1-based.
Args:
fasta (str): File name of the genome from which reads are created (FASTA file).
sequences (set of int or str): FASTA sequences to extract. Sequences can be specified either by their ids, or by their names.
coverage (float): Average coverage of the genome (if number_of_reads specified,
then it must be equal to zero).
Corresponding DWGsim parameter: ``-C``.
number_of_read_tuples (int): Number of read tuples (if coverage specified, then
it must be equal to zero).
Corresponding DWGsim parameter: ``-N``.
read_length_1 (int): Length of the first read.
Corresponding DWGsim parameter: ``-1``.
read_length_2 (int): Length of the second read (if zero, then single-end
simulation performed).
Corresponding DWGsim parameter: ``-2``.
distance (int): Mean inner distance between reads.
Corresponding DWGsim parameter: ``-d``.
distance_deviation (int): Standard deviation of inner distances between both reads.
Corresponding DWGsim parameter: ``-s``.
rng_seed (int): Seed for simulator's random number generator.
Corresponding DWGsim parameter: ``-z``.
haploid_mode (bools): Simulate reads in haploid mode.
Corresponding DWGsim parameter: ``-H``.
error_rate_1 (float): Sequencing error rate in the first read.
Corresponding DWGsim parameter: ``-e``.
error_rate_2 (float): Sequencing error rate in the second read.
Corresponding DWGsim parameter: ``-E``.
mutation_rate (float): Mutation rate.
Corresponding DWGsim parameter: ``-e``.
indels (float): Rate of indels in mutations.
Corresponding DWGsim parameter: ``-R``.
prob_indel_ext (float): Probability that an indel is extended.
Corresponding DWGsim parameter: ``-X``.
estimate_unknown_values (bool): Estimate unknown values (coordinates missing in
DWGsim output).
other_params (str): Other parameters which are used on command-line.
vcf (str): File name of the list of mutations (VCF output of DWGSIM).
Raises:
ValueError
"""
def __init__(
self,
fasta,
sequences=None,
coverage=0,
number_of_read_tuples=0,
read_length_1=100,
read_length_2=0,
distance=500,
distance_deviation=50.0,
rng_seed=1,
haploid_mode=False,
error_rate_1=0.020,
error_rate_2=0.020,
mutation_rate=0.001,
indels=0.15,
prob_indel_ext=0.3,
estimate_unknown_values=False,
other_params="",
vcf=None,
):
if read_length_2 == 0:
ends = 1
else:
ends = 2
self.distance = distance
self.distance_deviation = distance_deviation
super().__init__(
fasta=fasta,
sequences=sequences,
reads_in_tuple=ends,
rng_seed=rng_seed,
)
self.read_length_1 = read_length_1
self.read_length_2 = read_length_2
self.other_params = other_params
coverage = float(coverage)
number_of_read_tuples = int(number_of_read_tuples)
if coverage * number_of_read_tuples != 0:
rnftools.utils.error(
"coverage or number_of_read_tuples must be equal to zero",
program="RNFtools",
subprogram="MIShmash",
exception=ValueError,
)
self.number_of_read_tuples = number_of_read_tuples
self.coverage = coverage
self.haploid_mode = haploid_mode
self.error_rate_1 = error_rate_1
self.error_rate_2 = error_rate_2
self.mutation_rate = mutation_rate
self.indels = indels
self.prob_indel_ext = prob_indel_ext
self.estimate_unknown_values = estimate_unknown_values
self.vcf = vcf
self.dwg_prefix = os.path.join(
self.get_dir(),
"dwgsim_files.{}.{}".format("se" if self.number_of_read_tuples == 1 else "pe", self.genome_id)
)
def get_input(self):
return [
self._fa_fn,
self._fai_fn,
]
def get_output(self):
return [
self.dwg_prefix + ".bwa.read1.fastq",
self.dwg_prefix + ".bwa.read2.fastq",
self.dwg_prefix + ".bfast.fastq",
self.dwg_prefix + ".mutations.vcf",
self.dwg_prefix + ".mutations.txt",
self._fq_fn,
]
def create_fq(self):
if self.coverage == 0 and self.number_of_read_tuples == 0:
for x in self.get_output():
with open(x, "w+") as f:
f.write(os.linesep)
else:
if self.number_of_read_tuples == 0:
genome_size = os.stat(self._fa_fn).st_size
self.number_of_read_tuples = int(
self.coverage * genome_size / (self.read_length_1 + self.read_length_2)
)
if self._reads_in_tuple == 2:
paired_params = "-d {dist} -s {dist_dev}".format(
dist=self.distance,
dist_dev=self.distance_deviation,
)
else:
paired_params = ""
rnftools.utils.shell(
"""
"{dwgsim}" \
-1 {rlen1} \
-2 {rlen2} \
-z {rng_seed} \
-y 0 \
-N {nb} \
-e {error_rate_1} \
-E {error_rate_2} \
-r {mutation_rate} \
-R {indels} \
-X {prob_indel_ext} \
{haploid} \
{paired_params} \
{other_params} \
"{fa}" \
"{pref}" \
> /dev/null
""".format(
dwgsim="dwgsim",
fa=self._fa_fn,
pref=self.dwg_prefix,
nb=self.number_of_read_tuples,
rlen1=self.read_length_1,
rlen2=self.read_length_2,
other_params=self.other_params,
paired_params=paired_params,
rng_seed=self._rng_seed,
haploid="-H" if self.haploid_mode else "",
error_rate_1=self.error_rate_1,
error_rate_2=self.error_rate_2,
mutation_rate=self.mutation_rate,
indels=self.indels,
prob_indel_ext=self.prob_indel_ext,
)
)
with open(self._fq_fn, "w+") as fastq_fo:
with open(self._fai_fn) as fai_fo:
self.recode_dwgsim_reads(
dwgsim_prefix=self.dwg_prefix,
fastq_rnf_fo=fastq_fo,
fai_fo=fai_fo,
genome_id=self.genome_id,
number_of_read_tuples=10**9,
# allow_unmapped=False,
estimate_unknown_values=self.estimate_unknown_values,
)
if self.vcf is not None:
snakemake.shell("find .")
dwgsim_vcf = "{}.mutations.vcf".format(self.dwg_prefix)
snakemake.shell("cp '{}' '{}'".format(dwgsim_vcf, self.vcf))
@staticmethod
def recode_dwgsim_reads(
dwgsim_prefix,
fastq_rnf_fo,
fai_fo,
genome_id,
estimate_unknown_values,
number_of_read_tuples=10**9,
):
"""Convert DwgSim FASTQ file to RNF FASTQ file.
Args:
dwgsim_prefix (str): DwgSim prefix of the simulation (see its commandline parameters).
fastq_rnf_fo (file): File object of RNF FASTQ.
fai_fo (file): File object for FAI file of the reference genome.
genome_id (int): RNF genome ID to be used.
estimate_unknown_values (bool): Estimate unknown values (right coordinate of each end).
number_of_read_tuples (int): Estimate of number of simulated read tuples (to set width).
"""
dwgsim_pattern = re.compile(
'@(.*)_([0-9]+)_([0-9]+)_([01])_([01])_([01])_([01])_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_(([0-9abcdef])+)'
)
###
# DWGSIM read name format
#
# 1) contig name (chromsome name)
# 2) start end 1 (one-based)
# 3) start end 2 (one-based)
# 4) strand end 1 (0 - forward, 1 - reverse)
# 5) strand end 2 (0 - forward, 1 - reverse)
# 6) random read end 1 (0 - from the mutated reference, 1 - random)
# 7) random read end 2 (0 - from the mutated reference, 1 - random)
# 8) number of sequencing errors end 1 (color errors for colorspace)
# 9) number of SNPs end 1
# 10) number of indels end 1
# 11) number of sequencing errors end 2 (color errors for colorspace)
# 12) number of SNPs end 2
# 13) number of indels end 2
# 14) read number (unique within a given contig/chromosome)
###
fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo)
read_tuple_id_width = len(format(number_of_read_tuples, 'x'))
# parsing FQ file
read_tuple_id = 0
last_read_tuple_name = None
old_fq = "{}.bfast.fastq".format(dwgsim_prefix)
fq_creator = rnftools.rnfformat.FqCreator(
fastq_fo=fastq_rnf_fo,
read_tuple_id_width=read_tuple_id_width,
genome_id_width=2,
chr_id_width=fai_index.chr_id_width,
coor_width=fai_index.coor_width,
info_reads_in_tuple=True,
info_simulator="dwgsim",
)
i = 0
with open(old_fq, "r+") as f1:
for line in f1:
if i % 4 == 0:
read_tuple_name = line[1:].strip()
if read_tuple_name != last_read_tuple_name:
new_tuple = True
if last_read_tuple_name is not None:
read_tuple_id += 1
else:
new_tuple = False
last_read_tuple_name = read_tuple_name
m = dwgsim_pattern.search(line)
if m is None:
rnftools.utils.error(
"Read tuple '{}' was not created by DwgSim.".format(line[1:]),
program="RNFtools",
subprogram="MIShmash",
exception=ValueError,
)
contig_name = m.group(1)
start_1 = int(m.group(2))
start_2 = int(m.group(3))
direction_1 = "F" if int(m.group(4)) == 0 else "R"
direction_2 = "F" if int(m.group(5)) == 0 else "R"
# random_1 = bool(m.group(6))
# random_2 = bool(m.group(7))
# seq_err_1 = int(m.group(8))
# snp_1 = int(m.group(9))
# indels_1 = int(m.group(10))
# seq_err_2 = int(m.group(11))
# snp_2 = int(m.group(12))
# indels_2 = int(m.group(13))
# read_tuple_id_dwg = int(m.group(14), 16)
chr_id = fai_index.dict_chr_ids[contig_name] if fai_index.dict_chr_ids != {} else "0"
elif i % 4 == 1:
bases = line.strip()
if new_tuple:
segment = rnftools.rnfformat.Segment(
genome_id=genome_id,
chr_id=chr_id,
direction=direction_1,
left=start_1,
right=start_1 + len(bases) - 1 if estimate_unknown_values else 0,
)
else:
segment = rnftools.rnfformat.Segment(
genome_id=genome_id,
chr_id=chr_id,
direction=direction_2,
left=start_2,
right=start_2 + len(bases) - 1 if estimate_unknown_values else 0,
)
elif i % 4 == 2:
pass
elif i % 4 == 3:
qualities = line.strip()
fq_creator.add_read(
read_tuple_id=read_tuple_id,
bases=bases,
qualities=qualities,
segments=[segment],
)
i += 1
fq_creator.flush_read_tuple()
|
karel-brinda/rnftools
|
rnftools/mishmash/DwgSim.py
|
Python
|
mit
| 12,821
|
[
"BWA"
] |
37bccbe1207a4532a2ebd853c5c599e91a04b3fd3beed0be3615e1e9987f74d5
|
# -*- coding: utf-8 -*-
"""
Small utility to convert p2p format of IP Blocklists to IPSet format.
Usage:
{program} generate [options] BLOCKLIST_URL...
{program} example_restore_ipset_job [options] IPTABLES_NAME IPSET_PATH
{program} example_update_ipset_job [options] IPSET_PATH BLOCKLIST_URL...
{program} -h | --help
{program} --version
Options:
-h --help Shows this screen.
--version Shows version and exits.
-i IPSET_NAME --ipset=IPSET_NAME The name of IPSet set [default: blocklist]
To get IP blocklists please visit https://www.iblocklist.com/
"""
PROGRAM_NAME = "iblocklist2ipset"
VERSION = 0, 0, 1
ATTEMPT_COUNT = 16
TIME_TO_SLEEP = 1
RESTORE_IPSET_JOB_SCRIPT = r"""
ipset restore -f {ipset_filename}
iptables -F {iptables_name}
iptables -A {iptables_name} \
-m state --state NEW \
-m set --match-set {ipset_name} src \
-j REJECT --reject-with icmp-host-unreachable
iptables -A {iptables_name} \
-m state --state NEW \
-m set --match-set {ipset_name} dst \
-j REJECT --reject-with icmp-host-unreachable
""".strip()
UPDATE_IPSET_JOB_SCRIPT = r"""
{progpath} generate --ipset {ipset_name} {urls} > /tmp/{progname}.ipset
mv /tmp/{progname}.ipset {ipset_path}
""".strip()
def get_version():
return ".".join(str(num) for num in VERSION)
|
9seconds/iblocklist2ipset
|
iblocklist2ipset/__init__.py
|
Python
|
mit
| 1,343
|
[
"VisIt"
] |
f570429eac6a112d53452a6c28a6297b00a5b1e9263b9c766a8f6d33f94a16ff
|
#!/usr/bin/python
# Copyright (c) 2013, Thomas Rast <trast@inf.ethz.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''Try a simple merge evilness detection.
At this point this is purely tree-based, so it cannot detect evilness
at a hunk level.'''
import sys
import subprocess
import optparse
from collections import defaultdict
usage = '%prog <merge> [ <parent1> <parent2> [--] [<mergebase>...] ]'
description = '''\
Show whether <merge> contains any candidates for file-level evilness.
The remaining args are optional, but the merge base in particular is
expensive to compute so you may want to provide it from a cache.
Works only on 2-parent merges. (Octopus merges are not supposed to be
created from conflicting changes anyway.)'''
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option('--stdin', default=False, action='store_true', dest='stdin',
help='Read arguments from stdin (one set of args per line)')
def get_merge_bases(cmt1, cmt2):
try:
out = subprocess.check_output(['git', 'merge-base', '--all', cmt1, cmt2])
return out.strip().split()
except subprocess.CalledProcessError, e:
# merge-base fails with status 1 if there are no bases
if e.returncode == 1:
return []
raise
def get_parents(commit):
out = subprocess.check_output(['git', 'rev-parse', commit+'^1', commit+'^2'])
return out.strip().split()
def ls_tree(cmt):
'''Call git-diff-tree and parse results
The return value is a sequence with each element of the form
(oldmode, newmode, oldhash, newhash, status, filename).
FIXME: should convert to streaming input'''
p = subprocess.Popen(['git', 'ls-tree', '-r', '-z', cmt],
stdout=subprocess.PIPE)
data = p.stdout.read()
for line in data.split('\0'):
if not line: # last element is empty
continue
meta, filename = line.split('\t', 1)
mode, type, sha = meta.split()
yield (mode, sha, filename)
ret = p.wait()
assert ret == 0
# By convention the null sha1 is used to represent nonexistent files.
# We could use anything here, however.
nonexistent = '0'*40
def dict_ls_tree(cmt):
'''Like ls_tree, but the result is a magic dict {filename:hash}.
The magic part is that it is a defaultdict, returning the
customary "absent" null sha1 if you ask for a file that was not in
that tree.'''
ret = defaultdict(lambda : nonexistent)
for mode, sha, filename in ls_tree(cmt):
ret[filename] = sha
return ret, set(ret.keys())
def find_changed(fileset, tree1, tree2):
ret = set()
for f in fileset:
if tree1[f] != tree2[f]:
ret.add(f)
return ret
def die(fmt, *fmtargs):
sys.stderr.write(fmt % fmtargs)
sys.exit(1)
def detect_evilness(M, A, B, bases):
# History looks like this on a high level:
#
# M
# / \
# A B
# \ /
# Y1, Y2, ...
#
#
# Obviously files are only interesting if A and B do not all have
# the same content (otherwise the merge was trivial).
#
# There are two suspect cases, for any given file:
#
# (1) M agrees with A or B, but neither of them matches any
# merge-base. In this case there should have been a
# nontrivial file-level merge.
#
# (2) M agrees with A (or B), but B (or A, resp.) does not match
# any merge-base.
#
# Actually (1) is a special case of (2). However, I find it helps
# to distinguish them and label them as
# (1) modified in both, took <side>
# (2) modified in <side>, took <other side>
#
# FIXME: need to think about what happens in rename detection
# cases
suspects = []
treeM, filesM = dict_ls_tree(M)
treeA, filesA = dict_ls_tree(A)
treeB, filesB = dict_ls_tree(B)
if bases:
treeY, filesY = zip(*[dict_ls_tree(Y) for Y in bases])
else:
# if the ancestries are disjoint, we pretend as if there was a
# merge base with an empty tree
treeY = [defaultdict(lambda : nonexistent)]
filesY = [set([])]
# We only care about files that are in at least one of M, A and B
files_MAB = filesM.union(filesA).union(filesB)
# and from those, only files that do not agree among all parents
files_changed = (find_changed(files_MAB, treeA, treeM)
| find_changed(files_MAB, treeB, treeM))
# case (1)
for f in files_changed:
if any(treeA[f] == t[f] for t in treeY):
continue
if any(treeB[f] == t[f] for t in treeY):
continue
if treeM[f] == treeA[f]:
suspects.append((f, 'modified in both, took ^1'))
elif treeM[f] == treeB[f]:
suspects.append((f, 'modified in both, took ^2'))
# don't look at the same files again
files_changed.difference_update(f for f,reason in suspects)
# case (2)
def case2_helper(side1, side2, cause):
for f in files_changed:
if side1[f] != treeM[f]:
continue
if any(side2[f] == t[f] for t in treeY):
continue
suspects.append((f, cause))
case2_helper(treeA, treeB, 'modified in ^2, took ^1')
case2_helper(treeB, treeA, 'modified in ^1, took ^2')
suspects.sort()
return suspects
def process_args(args, unhandled_fatal=True):
if len(args) > 3 and args[3] == '--':
del args[3]
if len(args) < 1:
if not unhandled_fatal:
return
parser.print_usage()
sys.exit(1)
merge = args[0]
parent1 = None
parent2 = None
bases = None
if len(args) > 1:
parent1 = args[1]
if len(args) > 2:
parent2 = args[2]
if len(args) > 3:
bases = args[3:]
if not parent1 or not parent2:
try:
parent1, parent2 = get_parents(merge)
except ValueError:
if not unhandled_fatal:
return
die('%s does not appear to be a merge\n', merge)
if not bases:
bases = get_merge_bases(parent1, parent2)
suspects = detect_evilness(merge, parent1, parent2, bases)
if suspects:
print "commit %s" % merge
print "suspicious merge in files:"
for filename, desc in suspects:
print "\t%-25s\t%s" % (desc, filename)
print
if __name__ == '__main__':
options, args = parser.parse_args()
if options.stdin:
for line in sys.stdin:
args = line.strip().split()
process_args(args, unhandled_fatal=False)
else:
process_args(args)
|
trast/evilmergediff
|
evil-base-treediff.py
|
Python
|
gpl-2.0
| 7,203
|
[
"Octopus"
] |
344b6a33acd7cac8147c56df3e4a000497b480fbf204a288bfe239703eb594b7
|
"""
Executes a set of implementations as a program.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import os, sys
from string import Template
from zeroinstall import support
from zeroinstall.injector.model import SafeException, EnvironmentBinding, ExecutableBinding, Command, Dependency
from zeroinstall.injector import namespaces, qdom
from zeroinstall.support import basedir
def do_env_binding(binding, path):
"""Update this process's environment by applying the binding.
@param binding: the binding to apply
@type binding: L{model.EnvironmentBinding}
@param path: the selected implementation
@type path: str"""
if binding.insert is not None and path is None:
# Skip insert bindings for package implementations
logger.debug("not setting %s as we selected a package implementation", binding.name)
return
os.environ[binding.name] = binding.get_value(path,
os.environ.get(binding.name, None))
logger.info("%s=%s", binding.name, os.environ[binding.name])
def test_selections(selections, prog_args, dry_run, main):
"""Run the program in a child process, collecting stdout and stderr.
@return: the output produced by the process
@since: 0.27
"""
import tempfile
output = tempfile.TemporaryFile(prefix = '0launch-test')
try:
child = os.fork()
if child == 0:
# We are the child
try:
try:
os.dup2(output.fileno(), 1)
os.dup2(output.fileno(), 2)
execute_selections(selections, prog_args, dry_run, main)
except:
import traceback
traceback.print_exc()
finally:
sys.stdout.flush()
sys.stderr.flush()
os._exit(1)
logger.info(_("Waiting for test process to finish..."))
pid, status = os.waitpid(child, 0)
assert pid == child
output.seek(0)
results = output.read()
if status != 0:
results += _("Error from child process: exit code = %d") % status
finally:
output.close()
return results
def _process_args(args, element):
"""Append each <arg> under <element> to args, performing $-expansion."""
for child in element.childNodes:
if child.uri == namespaces.XMLNS_IFACE and child.name == 'arg':
args.append(Template(child.content).substitute(os.environ))
class Setup(object):
"""@since: 1.2"""
stores = None
selections = None
_exec_bindings = None
_checked_runenv = False
def __init__(self, stores, selections):
"""@param stores: where to find cached implementations
@type stores: L{zerostore.Stores}"""
self.stores = stores
self.selections = selections
def build_command(self, command_iface, command_name, user_command = None):
"""Create a list of strings to be passed to exec to run the <command>s in the selections.
@param command_iface: the interface of the program being run
@type command_iface: str
@param command_name: the name of the command being run
@type command_name: str
@param user_command: a custom command to use instead
@type user_command: L{model.Command}
@return: the argument list
@rtype: [str]"""
if not (command_name or user_command):
raise SafeException(_("Can't run: no command specified!"))
prog_args = []
sels = self.selections.selections
while command_name or user_command:
command_sel = sels[command_iface]
if user_command is None:
command = command_sel.get_command(command_name)
else:
command = user_command
user_command = None
command_args = []
# Add extra arguments for runner
runner = command.get_runner()
if runner:
command_iface = runner.interface
command_name = runner.command
_process_args(command_args, runner.qdom)
else:
command_iface = None
command_name = None
# Add main program path
command_path = command.path
if command_path is not None:
if command_sel.id.startswith('package:'):
prog_path = command_path
else:
if command_path.startswith('/'):
raise SafeException(_("Command path must be relative, but '%s' starts with '/'!") %
command_path)
prog_path = os.path.join(command_sel.get_path(self.stores), command_path)
assert prog_path is not None
if not os.path.exists(prog_path):
raise SafeException(_("File '%(program_path)s' does not exist.\n"
"(implementation '%(implementation_id)s' + program '%(main)s')") %
{'program_path': prog_path, 'implementation_id': command_sel.id,
'main': command_path})
command_args.append(prog_path)
# Add extra arguments for program
_process_args(command_args, command.qdom)
prog_args = command_args + prog_args
# Each command is run by the next, but the last one is run by exec, and we
# need a path for that.
if command.path is None:
raise SafeException("Missing 'path' attribute on <command>")
return prog_args
def prepare_env(self):
"""Do all the environment bindings in the selections (setting os.environ)."""
self._exec_bindings = []
def _do_bindings(impl, bindings, iface):
for b in bindings:
self.do_binding(impl, b, iface)
def _do_deps(deps):
for dep in deps:
dep_impl = sels.get(dep.interface, None)
if dep_impl is None:
assert dep.importance != Dependency.Essential, dep
else:
_do_bindings(dep_impl, dep.bindings, dep.interface)
sels = self.selections.selections
for selection in sels.values():
_do_bindings(selection, selection.bindings, selection.interface)
_do_deps(selection.dependencies)
# Process commands' dependencies' bindings too
for command in selection.get_commands().values():
_do_bindings(selection, command.bindings, selection.interface)
_do_deps(command.requires)
# Do these after <environment>s, because they may do $-expansion
for binding, iface in self._exec_bindings:
self.do_exec_binding(binding, iface)
self._exec_bindings = None
def do_binding(self, impl, binding, iface):
"""Called by L{prepare_env} for each binding.
Sub-classes may wish to override this.
@param impl: the selected implementation
@type impl: L{selections.Selection}
@param binding: the binding to be processed
@type binding: L{model.Binding}
@param iface: the interface containing impl
@type iface: L{model.Interface}
"""
if isinstance(binding, EnvironmentBinding):
if impl.id.startswith('package:'):
path = None # (but still do the binding, e.g. for values)
else:
path = impl.get_path(self.stores)
do_env_binding(binding, path)
elif isinstance(binding, ExecutableBinding):
if isinstance(iface, Dependency):
import warnings
warnings.warn("Pass an interface URI instead", DeprecationWarning, 2)
iface = iface.interface
self._exec_bindings.append((binding, iface))
def do_exec_binding(self, binding, iface):
assert iface is not None
name = binding.name
if '/' in name or name.startswith('.') or "'" in name:
raise SafeException("Invalid <executable> name '%s'" % name)
exec_dir = basedir.save_cache_path(namespaces.config_site, namespaces.config_prog, 'executables', name)
exec_path = os.path.join(exec_dir, name + ".exe" if os.name == "nt" else name)
if os.name != "nt" and not self._checked_runenv:
self._check_runenv()
if not os.path.exists(exec_path):
if os.name == "nt":
# Copy runenv.cli.template to ~/.cache/0install.net/injector/executables/$name/$name
import shutil
shutil.copyfile(os.environ['ZEROINSTALL_CLI_TEMPLATE'], exec_path)
else:
# Symlink ~/.cache/0install.net/injector/executables/$name/$name to runenv.py
os.symlink('../../runenv.py', exec_path)
os.chmod(exec_dir, 0o500)
if binding.in_path:
path = os.environ["PATH"] = exec_dir + os.pathsep + os.environ["PATH"]
logger.info("PATH=%s", path)
else:
os.environ[name] = exec_path
logger.info("%s=%s", name, exec_path)
args = self.build_command(iface, binding.command)
if os.name == "nt":
os.environ["0install-runenv-file-" + name] = args[0]
os.environ["0install-runenv-args-" + name] = support.windows_args_escape(args[1:])
else:
import json
os.environ["0install-runenv-" + name] = json.dumps(args)
def _check_runenv(self):
# Create the runenv.py helper script under ~/.cache if missing or out-of-date
main_dir = basedir.save_cache_path(namespaces.config_site, namespaces.config_prog)
runenv = os.path.join(main_dir, 'runenv.py')
expected_contents = "#!%s\nfrom zeroinstall.injector import _runenv; _runenv.main()\n" % sys.executable
actual_contents = None
if os.path.exists(runenv):
with open(runenv) as s:
actual_contents = s.read()
if actual_contents != expected_contents:
import tempfile
tmp = tempfile.NamedTemporaryFile('w', dir = main_dir, delete = False)
logger.info("Updating %s", runenv)
tmp.write(expected_contents)
tmp.close()
os.chmod(tmp.name, 0o555)
os.rename(tmp.name, runenv)
self._checked_runenv = True
def execute_selections(selections, prog_args, dry_run = False, main = None, wrapper = None, stores = None):
"""Execute program. On success, doesn't return. On failure, raises an Exception.
Returns normally only for a successful dry run.
@param selections: the selected versions
@type selections: L{selections.Selections}
@param prog_args: arguments to pass to the program
@type prog_args: [str]
@param dry_run: if True, just print a message about what would have happened
@type dry_run: bool
@param main: the name of the binary to run, or None to use the default
@type main: str
@param wrapper: a command to use to actually run the binary, or None to run the binary directly
@type wrapper: str
@since: 0.27
@precondition: All implementations are in the cache.
"""
#assert stores is not None
if stores is None:
from zeroinstall import zerostore
stores = zerostore.Stores()
setup = Setup(stores, selections)
commands = selections.commands
if main is not None:
# Replace first command with user's input
if main.startswith('/'):
main = main[1:] # User specified a path relative to the package root
else:
old_path = commands[0].path if commands else None
if not old_path:
raise SafeException(_("Can't use a relative replacement main when there is no original one!"))
main = os.path.join(os.path.dirname(old_path), main) # User main is relative to command's name
# Copy all child nodes (e.g. <runner>) except for the arguments
user_command_element = qdom.Element(namespaces.XMLNS_IFACE, 'command', {'path': main})
if commands:
for child in commands[0].qdom.childNodes:
if child.uri == namespaces.XMLNS_IFACE and child.name == 'arg':
continue
user_command_element.childNodes.append(child)
user_command = Command(user_command_element, None)
else:
user_command = None
setup.prepare_env()
prog_args = setup.build_command(selections.interface, selections.command, user_command) + prog_args
if wrapper:
prog_args = ['/bin/sh', '-c', wrapper + ' "$@"', '-'] + list(prog_args)
if dry_run:
print(_("Would execute: %s") % ' '.join(prog_args))
else:
logger.info(_("Executing: %s"), prog_args)
sys.stdout.flush()
sys.stderr.flush()
try:
env = os.environ.copy()
for x in ['0install-runenv-ZEROINSTALL_GPG', 'ZEROINSTALL_GPG']:
if x in env:
del env[x]
os.execve(prog_args[0], prog_args, env)
except OSError as ex:
raise SafeException(_("Failed to run '%(program_path)s': %(exception)s") % {'program_path': prog_args[0], 'exception': str(ex)})
|
michel-slm/0install
|
zeroinstall/injector/run.py
|
Python
|
lgpl-2.1
| 11,445
|
[
"VisIt"
] |
399dc3901797a8efc70a0743675dfc0d7b4aebd0d039b2159b0826b07a2e7ba3
|
#!/usr/bin/python
"""
pyNEAT
Copyright (C) 2007-2008 Brian Greer
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
## This is examples learns XOR using the backprop algorithm included in pyNEAT
import sys
import os.path
import pyNEAT
from pyNEAT.BackPropTester import BackPropTester
pyNEAT.Configuration.printEvery = 100
class XORTest(BackPropTester):
def __init__(self):
BackPropTester.__init__(self, 'BP-XOR')
self.inputs = [[1.0, 0.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0]]
self.targets = [[0.0], [1.0], [1.0], [0.0]]
self.numInputs = len(self.inputs[0])
self.numHidden = 2
self.numOutputs = 1
def trainTest():
xorTest = XORTest()
xorTest.run(99.5)
def loadTest(loadFile):
xorTest = XORTest()
xorTest.nn = pyNEAT.NeuralNetwork(0)
xorTest.nn.load(loadFile)
xorTest.evaluate(True, False)
if __name__ == '__main__':
loadFile = 'nn.out'
if len(sys.argv) > 1 and sys.argv[1] == 'load' and os.path.exists(loadFile):
loadTest(loadFile)
else:
trainTest()
|
liquidkarma/pyneat
|
examples/xor/xor_bp.py
|
Python
|
gpl-2.0
| 1,714
|
[
"Brian"
] |
7bbcc108517a0e4def215580f3e10ac2f8093d9cd8da7d4f0305a7b7a6b84045
|
from __future__ import with_statement
import copy
import functools
import optparse
import os
import os.path
import re
import sys
try:
from com.xhaus.jyson import JysonCodec as json # jython embedded in buck
except ImportError:
import json # python test case
# TODO(user): upgrade to a jython including os.relpath
def relpath(path, start=os.path.curdir):
"""
Return a relative filepath to path from the current directory or an optional start point.
"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
common = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list) - common) + path_list[common:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a dictionary with information about
# the environment of the build file which is currently being processed.
# It contains the following keys:
#
# "BUILD_FILE_DIRECTORY" - The directory containing the build file.
#
# "BASE" - The base path of the build file.
#
# "PROJECT_ROOT" - An absolute path to the project root.
#
# "BUILD_FILE_SYMBOL_TABLE" - The global symbol table of the build file.
BUILD_FUNCTIONS = []
BUILD_RULES_FILE_NAME = 'BUCK'
def provide_for_build(func):
BUILD_FUNCTIONS.append(func)
return func
class LazyBuildEnvPartial:
"""Pairs a function with a build environment in which it should be executed.
Note that although both the function and build environment must be specified
via the constructor, the build environment may be reassigned after
construction.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the underlying
function.
"""
def __init__(self, func, default_build_env):
self.func = func
self.build_env = default_build_env
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({'build_env': self.build_env})
return self.func(*args, **updated_kwargs)
def make_build_file_symbol_table(build_env):
"""Creates a symbol table with functions decorated by @provide_for_build."""
symbol_table = {}
lazy_functions = []
for func in BUILD_FUNCTIONS:
func_with_env = LazyBuildEnvPartial(func, build_env)
symbol_table[func.__name__] = func_with_env.invoke
lazy_functions.append(func_with_env)
return {
'symbol_table': symbol_table,
'lazy_functions': lazy_functions}
def update_lazy_functions(lazy_functions, build_env):
"""Updates a list of LazyBuildEnvPartials with build_env."""
for lazy_function in lazy_functions:
lazy_function.build_env = build_env
def add_rule(rule, build_env):
# Include the base path of the BUILD file so the reader consuming this JSON will know which BUILD
# file the rule came from.
if 'name' not in rule:
raise ValueError('rules must contain the field \'name\'. Found %s.' % rule)
rule_name = rule['name']
if rule_name in build_env['RULES']:
raise ValueError('Duplicate rule definition found. Found %s and %s' %
(rule, build_env['RULES'][rule_name]))
rule['buck.base_path'] = build_env['BASE']
build_env['RULES'][rule_name] = rule
def glob_pattern_to_regex_string(pattern):
# Replace rules for glob pattern (roughly):
# . => \\.
# **/* => (.*)
# * => [^/]*
pattern = re.sub(r'\.', '\\.', pattern)
pattern = pattern.replace('**/*', '(.*)')
# This handles the case when there is a character preceding the asterisk.
pattern = re.sub(r'([^\.])\*', '\\1[^/]*', pattern)
# This handles the case when the asterisk is the first character.
pattern = re.sub(r'^\*', '[^/]*', pattern)
pattern = '^' + pattern + '$'
return pattern
def pattern_to_regex(pattern):
pattern = glob_pattern_to_regex_string(pattern)
return re.compile(pattern)
def symlink_aware_walk(base):
""" Recursive symlink aware version of `os.walk`.
Will not traverse a symlink that refers to a previously visited ancestor of
the current directory.
"""
visited_dirs = set()
for entry in os.walk(base, topdown=True, followlinks=True):
(root, dirs, _files) = entry
realdirpath = os.path.realpath(root)
if realdirpath in visited_dirs:
absdirpath = os.path.abspath(root)
if absdirpath.startswith(realdirpath):
dirs[:] = []
continue
visited_dirs.add(realdirpath)
yield entry
raise StopIteration
@provide_for_build
def glob(includes, excludes=[], build_env=None):
search_base = build_env['BUILD_FILE_DIRECTORY']
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(includes, basestring), \
"The first argument to glob() must be a list of strings."
assert not isinstance(excludes, basestring), \
"The excludes argument must be a list of strings."
inclusions = [pattern_to_regex(p) for p in includes]
exclusions = [pattern_to_regex(p) for p in excludes]
def passes_glob_filter(path):
for exclusion in exclusions:
if exclusion.match(path):
return False
for inclusion in inclusions:
if inclusion.match(path):
return True
return False
# Return the filtered set of includes as an array.
paths = []
def check_path(path):
if passes_glob_filter(path):
paths.append(path)
for root, dirs, files in symlink_aware_walk(search_base):
if len(files) == 0:
continue
relative_root = relpath(root, search_base)
# The regexes generated by glob_pattern_to_regex_string don't
# expect a leading './'
if relative_root == '.':
for file_path in files:
check_path(file_path)
else:
relative_root += '/'
for file_path in files:
relative_path = relative_root + file_path
check_path(relative_path)
return paths
@provide_for_build
def genfile(src, build_env=None):
return 'BUCKGEN:' + src
@provide_for_build
def java_library(
name,
srcs=[],
resources=[],
export_deps=None,
exported_deps=[],
source='6',
target='6',
proguard_config=None,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'java_library',
'name' : name,
'srcs' : srcs,
'resources' : resources,
# Temporary hack to let repos cut over to new style of exporting deps.
'exported_deps' : deps if export_deps else exported_deps,
'source' : source,
'target' : target,
'proguard_config' : proguard_config,
'deps' : deps + exported_deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def java_test(
name,
srcs=[],
labels=[],
resources=[],
source='6',
target='6',
vm_args=[],
source_under_test=[],
contacts=[],
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'java_test',
'name' : name,
'srcs' : srcs,
'labels': labels,
'resources' : resources,
'source' : source,
'target' : target,
'vm_args' : vm_args,
'source_under_test' : source_under_test,
'contacts' : contacts,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def robolectric_test(
name,
srcs=[],
labels=[],
resources=[],
vm_args=[],
source_under_test=[],
contacts=[],
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'robolectric_test',
'name' : name,
'srcs' : srcs,
'labels': labels,
'resources' : resources,
'vm_args' : vm_args,
'source_under_test' : source_under_test,
'contacts' : contacts,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def java_binary(
name,
main_class=None,
manifest_file=None,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'java_binary',
'name' : name,
'manifest_file': manifest_file,
'main_class' : main_class,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def prebuilt_jar(
name,
binary_jar,
source_jar=None,
javadoc_url=None,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type': 'prebuilt_jar',
'name': name,
'binary_jar': binary_jar,
'source_jar': source_jar,
'javadoc_url': javadoc_url,
'deps': deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def android_library(
name,
srcs=[],
resources=[],
manifest=None,
proguard_config=None,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'android_library',
'name' : name,
'srcs' : srcs,
'resources' : resources,
'manifest' : manifest,
'proguard_config' : proguard_config,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def android_resource(
name,
res=None,
package=None,
assets=None,
manifest=None,
deps=[],
visibility=[],
build_env=None):
if res:
res_srcs = glob([res + '/**/*'], build_env=build_env)
else:
res_srcs = None
if assets:
assets_srcs = glob([assets + '/**/*'], build_env=build_env)
else:
assets_srcs = None
add_rule({
'type' : 'android_resource',
'name' : name,
'res' : res,
'res_srcs' : res_srcs,
'package' : package,
'assets' : assets,
'assets_srcs' : assets_srcs,
'manifest' : manifest,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def prebuilt_native_library(
name,
native_libs=None,
is_asset=False,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'prebuilt_native_library',
'name' : name,
'native_libs' : native_libs,
'is_asset' : is_asset,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def android_binary(
name,
manifest,
target,
keystore,
package_type='debug',
no_dx=[],
use_split_dex=False,
use_linear_alloc_split_dex=False,
minimize_primary_dex_size=False,
disable_pre_dex=False,
dex_compression='jar',
use_android_proguard_config_with_optimizations=False,
proguard_config=None,
resource_compression=None,
primary_dex_substrings=None,
primary_dex_classes_file=None,
# By default, assume we have 5MB of linear alloc,
# 1MB of which is taken up by the framework, so that leaves 4MB.
linear_alloc_hard_limit=4 * 1024 * 1024,
resource_filter=None,
cpu_filters=[],
preprocess_java_classes_deps=[],
preprocess_java_classes_bash=None,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'android_binary',
'name' : name,
'manifest' : manifest,
'target' : target,
'keystore' : keystore,
'package_type' : package_type,
'no_dx' : no_dx,
'use_split_dex': use_split_dex,
'use_linear_alloc_split_dex': use_linear_alloc_split_dex,
'minimize_primary_dex_size': minimize_primary_dex_size,
'disable_pre_dex' : disable_pre_dex,
'dex_compression': dex_compression,
'use_android_proguard_config_with_optimizations':
use_android_proguard_config_with_optimizations,
'proguard_config' : proguard_config,
'resource_compression' : resource_compression,
'primary_dex_substrings' : primary_dex_substrings,
'primary_dex_classes_file' : primary_dex_classes_file,
'linear_alloc_hard_limit' : linear_alloc_hard_limit,
'resource_filter' : resource_filter,
'cpu_filters' : cpu_filters,
'preprocess_java_classes_deps' : preprocess_java_classes_deps,
'preprocess_java_classes_bash' : preprocess_java_classes_bash,
'classpath_deps' : deps,
# Always include the keystore as a dep, as it should be built before this rule.
'deps' : deps + [keystore] + preprocess_java_classes_deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def android_instrumentation_apk(
name,
manifest,
apk,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'android_instrumentation_apk',
'name' : name,
'manifest' : manifest,
'apk' : apk,
'classpath_deps' : deps,
'deps' : deps + [ apk ],
'visibility' : visibility,
}, build_env)
@provide_for_build
def ndk_library(
name,
flags=None,
is_asset=False,
deps=[],
visibility=[],
build_env=None):
EXTENSIONS = ("mk", "h", "hpp", "c", "cpp", "cc", "cxx")
srcs = glob(["**/*.%s" % ext for ext in EXTENSIONS], build_env=build_env)
add_rule({
'type' : 'ndk_library',
'name' : name,
'srcs' : srcs,
'flags' : flags,
'is_asset' : is_asset,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def python_library(
name,
srcs=[],
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'python_library',
'name' : name,
'srcs' : srcs,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def python_binary(
name,
main=None,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'python_binary',
'name' : name,
'main' : main,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def android_manifest(
name,
skeleton,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'android_manifest',
'name' : name,
'skeleton' : skeleton,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def keystore(
name,
store,
properties,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'keystore',
'name' : name,
'store' : store,
'properties' : properties,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def gen_aidl(name, aidl, import_path, deps=[], visibility=[], build_env=None):
add_rule({
'type' : 'gen_aidl',
'name' : name,
'aidl' : aidl,
'import_path' : import_path,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def gen_parcelable(
name,
srcs,
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'gen_parcelable',
'name' : name,
'srcs' : srcs,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def genrule(name,
out,
cmd=None,
bash=None,
cmd_exe=None,
srcs=[],
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'genrule',
'name' : name,
'srcs' : srcs,
'cmd' : cmd,
'bash' : bash,
'cmd_exe' : cmd_exe,
'out' : out,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def apk_genrule(name,
srcs,
apk,
out,
cmd=None,
bash=None,
cmd_exe=None,
deps=[],
visibility=[],
build_env=None):
# Always include the apk as a dep, as it should be built before this rule.
deps = deps + [apk]
add_rule({
'type' : 'apk_genrule',
'name' : name,
'srcs' : srcs,
'apk': apk,
'cmd' : cmd,
'bash' : bash,
'cmd_exe' : cmd_exe,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def sh_binary(
name,
main,
resources=[],
deps=[],
visibility=[],
build_env=None):
add_rule({
'type' : 'sh_binary',
'name' : name,
'main' : main,
'resources' : resources,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def sh_test(name, test, labels=[], deps=[], visibility=[], build_env=None):
add_rule({
'type' : 'sh_test',
'name' : name,
'test' : test,
'labels' : labels,
'deps' : deps,
'visibility' : visibility,
}, build_env)
@provide_for_build
def export_file(name, src=None, out=None, visibility=[], build_env=None):
add_rule({
'type' : 'export_file',
'name' : name,
'src' : src,
'out' : out,
'visibility': visibility,
}, build_env)
@provide_for_build
def include_defs(name, build_env=None):
"""Loads a file in the context of the current build file.
Name must begin with "//" and references a file relative to the project root.
An example is the build file //first-party/orca/orcaapp/BUILD contains
include_defs('//BUILD_DEFS')
which loads a list called NO_DX which can then be used in the build file.
"""
if name[:2] != '//':
raise ValueError('include_defs argument "%s" must begin with //' % name)
relative_path = name[2:]
include_file = os.path.join(build_env['PROJECT_ROOT'], relative_path)
build_env['INCLUDES'].append(include_file)
execfile(include_file, build_env['BUILD_FILE_SYMBOL_TABLE'])
@provide_for_build
def project_config(
src_target=None,
src_roots=[],
test_target=None,
test_roots=[],
is_intellij_plugin=False,
build_env=None):
deps = []
if src_target:
deps.append(src_target)
if test_target:
deps.append(test_target)
add_rule({
'type' : 'project_config',
'name' : 'project_config',
'src_target' : src_target,
'src_roots' : src_roots,
'test_target' : test_target,
'test_roots' : test_roots,
'is_intellij_plugin': is_intellij_plugin,
'deps' : deps,
'visibility' : [],
}, build_env)
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
Returns: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
"""
return build_env['BASE']
@provide_for_build
def add_deps(name, deps=[], build_env=None):
if name not in build_env['RULES']:
raise ValueError('Invoked \'add_deps\' on non-existent rule %s.' % name)
rule = build_env['RULES'][name]
if 'deps' not in rule:
raise ValueError('Invoked \'add_deps\' on rule %s that has no \'deps\' field' % name)
rule['deps'] = rule['deps'] + deps
class BuildFileProcessor:
def __init__(self, project_root, includes, server):
self.project_root = project_root
self.includes = includes
self.server = server
self.len_suffix = -len('/' + BUILD_RULES_FILE_NAME)
# Create root_build_env
build_env = {}
build_env['PROJECT_ROOT'] = self.project_root
build_symbols = make_build_file_symbol_table(build_env)
build_env['BUILD_FILE_SYMBOL_TABLE'] = build_symbols['symbol_table']
build_env['LAZY_FUNCTIONS'] = build_symbols['lazy_functions']
build_env['INCLUDES'] = []
# If there are any default includes, evaluate those first to populate the
# build_env.
for include in self.includes:
include_defs(include, build_env)
self.root_build_env = build_env
def process(self, build_file):
"""Process an individual build file and output JSON of result to stdout."""
# Reset build_env for each build file so that the variables declared in the
# build file or the files in includes through include_defs() don't pollute
# the namespace for subsequent build files.
build_env = copy.copy(self.root_build_env)
relative_path_to_build_file = relpath(build_file, self.project_root).replace('\\', '/')
build_env['BASE'] = relative_path_to_build_file[:self.len_suffix]
build_env['BUILD_FILE_DIRECTORY'] = os.path.dirname(build_file)
build_env['RULES'] = {}
# Copy BUILD_FILE_SYMBOL_TABLE over. This is the only dict that we need
# a sperate copy of since update_lazy_functions will modify it.
build_env['BUILD_FILE_SYMBOL_TABLE'] = copy.copy(
self.root_build_env['BUILD_FILE_SYMBOL_TABLE'])
# Re-apply build_env to the rules added in this file with
# @provide_for_build.
update_lazy_functions(build_env['LAZY_FUNCTIONS'], build_env)
execfile(os.path.join(self.project_root, build_file),
build_env['BUILD_FILE_SYMBOL_TABLE'])
values = build_env['RULES'].values()
values.append({"__includes": [build_file] + build_env['INCLUDES']})
if self.server:
print json.dumps(values)
else:
for value in values:
print json.dumps(value)
# Inexplicably, this script appears to run faster when the arguments passed into it are absolute
# paths. However, we want the "buck.base_path" property of each rule to be printed out to be the
# base path of the build target that identifies the rule. That means that when parsing a BUILD file,
# we must know its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to the project root.
# It must be followed by one or more absolute paths to BUILD files under the project root.
# If no paths to BUILD files are specified, then it will traverse the project root for BUILD files,
# excluding directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUILD files will be printed to stdout by a JSON
# parser. That means that printing out other information for debugging purposes will likely break
# the JSON parsing, so be careful!
def main():
parser = optparse.OptionParser()
parser.add_option('--project_root', action='store', type='string', dest='project_root')
parser.add_option('--include', action='append', dest='include')
parser.add_option('--ignore_path', action='append', dest='ignore_paths')
parser.add_option('--server', action='store_true', dest='server',
help='Invoke as a server to parse individual BUCK files on demand.')
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For example, it might be
# like "C:\project\.\rule". We normalize it in order to make it consistent with ignore_paths.
project_root = os.path.abspath(options.project_root)
build_files = []
if args:
# The user has specified which build files to parse.
build_files = args
elif not options.server:
# Find all of the build files in the project root. Symlinks will not be traversed.
# Search must be done top-down so that directory filtering works as desired.
# options.ignore_paths may contain /, which is needed to be normalized in order to do string
# pattern matching.
ignore_paths = [os.path.abspath(os.path.join(project_root, d))
for d in options.ignore_paths or []]
build_files = []
for dirpath, dirnames, filenames in symlink_aware_walk(project_root):
# Do not walk directories that contain generated/non-source files.
# All modifications to dirnames must occur in-place.
dirnames[:] = [d for d in dirnames if not (os.path.join(dirpath, d) in ignore_paths)]
if BUILD_RULES_FILE_NAME in filenames:
build_file = os.path.join(dirpath, BUILD_RULES_FILE_NAME)
build_files.append(build_file)
buildFileProcessor = BuildFileProcessor(project_root, options.include or [], options.server)
for build_file in build_files:
buildFileProcessor.process(build_file)
if options.server:
# Apparently for ... in sys.stdin doesn't work with Jython when a custom stdin is
# provided by the caller in Java-land. Claims that sys.stdin is a filereader which doesn't
# offer an iterator.
for build_file in iter(sys.stdin.readline, ''):
buildFileProcessor.process(build_file.rstrip())
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print >> sys.stderr, "Killed by User"
|
GerritCodeReview/buck
|
src/com/facebook/buck/parser/buck.py
|
Python
|
apache-2.0
| 24,221
|
[
"ORCA"
] |
68f4189947f9cf5b045df1c5edc9fc81202d900be45b39b0e1103cbbc546d134
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
class A(object):
def __init__(self, this, that):
self.this = this
self.that = that
def __eq__(self, b):
return b.__class__ is self.__class__ and b.this == self.this and b.that == self.that
def __neq__(self, b): return not self.__eq__(b)
def __repr__(self): return "A({0.this}, {0.that})".format(self)
@fixture
def first():
return A(0, A(5, A('a', 'b')))
@fixture
def second():
return A(1, A(6, A('c', 'd')))
@fixture
def dictionary(first, second):
from pylada.jobfolder.forwarding_dict import ForwardingDict
dictionary = ForwardingDict(ordered=True, readonly=True)
dictionary['first'] = first
dictionary['second'] = second
return dictionary
@fixture
def single_item_dict(dictionary, first):
# create dictionary with single item
dictionary.readonly = False
del dictionary.that.this
first.that.this = 5
return dictionary
def test_Aclass_fixture(first, second):
third = A(0, A(5, A('a', 'b')))
assert first == third
third.that.that.that = 'd'
assert first != third
def test_attribute_forwarding(first, second, dictionary):
assert dictionary['first'].this == 0
assert dictionary['first'].that.this == 5
assert dictionary['first'].that.that.this == 'a'
assert dictionary['first'].that.that.that == 'b'
assert dictionary['second'].this == 1
assert dictionary['second'].that.this == 6
assert dictionary['second'].that.that.this == 'c'
assert dictionary['second'].that.that.that == 'd'
def test_repr(first, second, dictionary):
assert repr(dictionary)[0] == '{'
assert repr(dictionary)[-1] == '}'
assert 'first' in repr(dictionary)
def test_iteration(first, second, dictionary):
for key, value in dictionary.items():
assert {'first': first, 'second': second}[key] == value
for key, value in dictionary.this.items():
assert {'first': first.this, 'second': second.this}[key] == value
for key, value in dictionary.that.items():
assert {'first': first.that, 'second': second.that}[key] == value
for key, value in dictionary.that.this.items():
assert {'first': first.that.this, 'second': second.that.this}[key] == value
for key, value in dictionary.that.that.items():
assert {'first': first.that.that, 'second': second.that.that}[key] == value
for key, value in dictionary.that.that.this.items():
assert {'first': first.that.that.this, 'second': second.that.that.this}[key] == value
for key, value in dictionary.that.that.that.items():
assert {'first': first.that.that.that, 'second': second.that.that.that}[key] == value
def test_fail_on_getting_missing_attribute(dictionary):
from pytest import raises
with raises(AttributeError):
dictionary.this.that
def test_fail_on_setting_missing_attribute(dictionary):
from pytest import raises
with raises(RuntimeError):
dictionary.this = 8
def test_fail_on_deleting_missing_attribute(dictionary):
from pytest import raises
with raises(RuntimeError):
del dictionary.this
def test_writing_to_dict(dictionary):
dictionary.readonly = False
assert all([u != 8 for u in dictionary.this.values()])
dictionary.this = 8
assert all([u == 8 for u in dictionary.this.values()])
assert all([u != 8 for u in dictionary.that.this.values()])
dictionary.that.this = 8
assert all([u == 8 for u in dictionary.that.this.values()])
def test_cannot_write_to_read_only(dictionary):
from pytest import raises
dictionary.readonly = True
with raises(RuntimeError):
dictionary.this = 8
def test_cannot_delete_attribute_from_readonly(dictionary):
from pytest import raises
dictionary.readonly = True
with raises(RuntimeError):
del dictionary.this
def test_deleting_attributes(first, second, dictionary):
from pytest import raises
dictionary.readonly = False
del dictionary.that.this
assert not hasattr(first.that, 'this')
assert not hasattr(second.that, 'this')
assert hasattr(first.that.that, 'this') and hasattr(first, 'this')
assert hasattr(second.that.that, 'this') and hasattr(second, 'this')
with raises(AttributeError):
dictionary.that.this
def test_naked_end_false(first, second, single_item_dict):
single_item_dict.naked_end = False
assert next(iter(single_item_dict.that.this.values())) == first.that.this
def test_naked_end_true(first, second, single_item_dict):
single_item_dict.naked_end = True
assert single_item_dict.that.this == first.that.this
def test_modify_only_existing(first, second, dictionary):
from pytest import raises
dictionary.readonly = False
dictionary.only_existing = True
with raises(AttributeError):
dictionary.that.other = True
def test_add_missing_attributes(first, second, dictionary):
dictionary.readonly = False
dictionary.only_existing = False
dictionary.that.other = True
assert getattr(first.that, 'other', False) == True
assert getattr(second.that, 'other', False) == True
def test_add_missing_nested_attributes(first, second, dictionary):
dictionary.readonly = False
dictionary.only_existing = False
del first.that.that
dictionary.that.that.other = True
assert getattr(second.that.that, 'other', False) == True
|
pylada/pylada-light
|
tests/jobfolder/test_forwardingdict.py
|
Python
|
gpl-3.0
| 6,557
|
[
"CRYSTAL",
"VASP"
] |
94de4745266d90148090607dfdd99fbfbfe3a1a73d33cc195f101027ec01aabe
|
# -------------------------------------------------------------------------
# Copyright (C) 2005-2013 Martin Strohalm <www.mmass.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Complete text of GNU GPL can be found in the file LICENSE.TXT in the
# main directory of the program.
# -------------------------------------------------------------------------
# load libs
import sys
import os
import xml.dom.minidom
# SET VERSION
# -----------
version = '5.5.0'
nightbuild = ''
# SET CONFIG FOLDER
# -----------------
# set config folder for MAC OS X
if sys.platform == 'darwin':
confdir = 'configs'
support = os.path.expanduser("~/Library/Application Support/")
userconf = os.path.join(support,'mMass')
if os.path.exists(support) and not os.path.exists(userconf):
try: os.mkdir(userconf)
except: pass
if os.path.exists(userconf):
confdir = userconf
# set config folder for Linux
elif sys.platform.startswith('linux') or sys.platform.startswith('freebsd'):
confdir = 'configs'
home = os.path.expanduser("~")
userconf = os.path.join(home,'.mmass')
if os.path.exists(home) and not os.path.exists(userconf):
try: os.mkdir(userconf)
except: pass
if os.path.exists(userconf):
confdir = userconf
# set config folder for Windows
else:
confdir = os.path.sep
for folder in os.path.dirname(os.path.realpath(__file__)).split(os.path.sep)[:-1]:
path = os.path.join(confdir, folder)
if os.path.isdir(path):
confdir = path
if os.path.isfile(path):
break
confdir = os.path.join(confdir, 'configs')
if not os.path.exists(confdir):
try: os.mkdir(confdir)
except: pass
if not os.path.exists(confdir):
raise IOError, "Configuration folder cannot be found!"
# INIT DEFAULT VALUES
# -------------------
internal={
'canvasXrange': None,
}
main={
'appWidth': 1050,
'appHeight': 620,
'appMaximized': 0,
'unlockGUI': 0,
'layout': 'default',
'documentsWidth': 195,
'documentsHeight': 195,
'peaklistWidth': 195,
'peaklistHeight': 195,
'mzDigits': 4,
'intDigits': 0,
'ppmDigits': 1,
'chargeDigits': 2,
'dataPrecision': 32,
'lastDir': '',
'lastSeqDir': '',
'errorUnits': 'Da',
'printQuality': 5,
'useServer': 1,
'serverPort': 65456,
'reverseScrolling': 0,
'macListCtrlGeneric': 1,
'peaklistColumns': ['mz', 'int', 'rel', 'sn', 'z', 'fwhm', 'resol'],
'cursorInfo': ['mz', 'dist', 'ppm', 'z'],
'updatesEnabled': 1,
'updatesChecked': '',
'updatesCurrent': version,
'updatesAvailable': version,
'compassMode': 'Profile',
'compassFormat': 'mzML',
'compassDeleteFile': 1,
}
recent=[]
colours=[
[16,71,185],
[50,140,0],
[241,144,0],
[76,199,197],
[143,143,21],
[38,122,255],
[38,143,73],
[237,187,0],
[120,109,255],
[179,78,0],
[128,191,189],
[137,136,68],
[200,136,18],
[197,202,61],
[123,182,255],
[69,67,138],
[24,129,131],
[131,129,131],
[69,126,198],
[189,193,123],
[127,34,0],
[76,78,76],
[31,74,145],
[15,78,75],
[79,26,81],
]
export={
'imageWidth': 750,
'imageHeight': 500,
'imageUnits': 'px',
'imageResolution': 72,
'imageFontsScale': 1,
'imageDrawingsScale': 1,
'imageFormat': 'PNG',
'peaklistColumns': ['mz','int'],
'peaklistFormat': 'ASCII',
'peaklistSeparator': 'tab',
'spectrumSeparator': 'tab',
}
spectrum={
'xLabel': 'm/z',
'yLabel': 'a.i.',
'showGrid': 1,
'showMinorTicks': 1,
'showLegend': 1,
'showPosBars': 1,
'showGel': 1,
'showGelLegend': 1,
'showTracker': 1,
'showNotations': 1,
'showLabels': 1,
'showAllLabels': 1,
'showTicks': 1,
'showDataPoints': 1,
'showCursorImage': 1,
'posBarSize': 7,
'gelHeight': 19,
'autoscale': 1,
'normalize': 0,
'overlapLabels': 0,
'checkLimits': 1,
'labelAngle': 90,
'labelCharge': 1,
'labelGroup': 0,
'labelBgr': 1,
'labelFontSize': 10,
'axisFontSize': 10,
'tickColour': [255,75,75],
'tmpSpectrumColour': [255,0,0],
'notationMarksColour': [0,255,0],
'notationMaxLength': 40,
'notationMarks': 1,
'notationLabels': 0,
'notationMZ': 0,
}
match={
'tolerance': 0.2,
'units': 'Da',
'ignoreCharge': 0,
'filterAnnotations': 0,
'filterMatches': 0,
'filterUnselected': 0,
'filterIsotopes': 1,
'filterUnknown': 0,
}
processing={
'math':{
'operation': 'normalize',
'multiplier': 1,
},
'crop':{
'lowMass': 500,
'highMass': 5000,
},
'baseline':{
'precision': 15,
'offset': 0.25,
},
'smoothing':{
'method': 'SG',
'windowSize': 0.3,
'cycles': 2,
},
'peakpicking':{
'snThreshold': 3.0,
'absIntThreshold': 0,
'relIntThreshold': 0.0,
'pickingHeight': 0.75,
'baseline': 1,
'smoothing': 1,
'deisotoping': 1,
'monoisotopic': 0,
'removeShoulders': 0,
},
'deisotoping':{
'maxCharge': 1,
'massTolerance': 0.1,
'intTolerance': 0.5,
'isotopeShift': 0.0,
'removeIsotopes': 1,
'removeUnknown': 1,
'labelEnvelope': '1st',
'envelopeIntensity': 'maximum',
'setAsMonoisotopic': 0,
},
'deconvolution':{
'massType': 0,
'groupWindow': 0.01,
'groupPeaks': 1,
'forceGroupWindow': 0,
},
'batch':{
'swap': 0,
'math': 0,
'crop': 0,
'baseline': 0,
'smoothing': 0,
'peakpicking': 0,
'deisotoping': 0,
'deconvolution': 0,
},
}
calibration={
'fitting': 'quadratic',
'tolerance': 50,
'units': 'ppm',
'statCutOff': 800,
}
sequence={
'editor':{
'gridSize': 20,
},
'digest':{
'maxMods': 1,
'maxCharge': 1,
'massType': 0,
'enzyme': 'Trypsin',
'miscl': 1,
'lowMass': 500,
'highMass': 5000,
'retainPos': 0,
'allowMods': 0,
'listTemplateAmino': 'b.S.a [m]',
'listTemplateCustom': 'b . [ S ] . a [m]',
'matchTemplateAmino': 'h b.S.a [m]',
'matchTemplateCustom': ' h b . [ S ] . a [m]',
},
'fragment':{
'maxMods': 1,
'maxCharge': 1,
'massType': 1,
'fragments': ['a','b','y','-NH3','-H2O'],
'maxLosses': 2,
'filterFragments': 1,
'listTemplateAmino': 'b.S.a [m]',
'listTemplateCustom': 'b . [ S ] . a [m]',
'matchTemplateAmino': 'f h [m]',
'matchTemplateCustom': 'f h [m]',
},
'search':{
'mass': 0,
'maxMods': 1,
'charge': 1,
'massType': 0,
'enzyme': 'Trypsin',
'semiSpecific': True,
'tolerance': 0.2,
'units': 'Da',
'retainPos': 0,
'listTemplateAmino': 'b.S.a [m]',
'listTemplateCustom': 'b . [ S ] . a [m]',
},
}
massCalculator={
'ionseriesAgent': 'H',
'ionseriesAgentCharge': 1,
'ionseriesPolarity': 1,
'patternFwhm': 0.1,
'patternIntensity': 100,
'patternBaseline': 0,
'patternShift': 0,
'patternThreshold': 0.001,
'patternShowPeaks': 1,
'patternPeakShape': 'gaussian',
}
massfilter={}
massToFormula={
'countLimit': 1000,
'massLimit': 3000,
'charge': 1,
'ionization': 'H',
'tolerance': 1.,
'units': 'ppm',
'formulaMin': '',
'formulaMax': '',
'autoCHNO': 1,
'checkPattern': 1,
'rules': ['HC','NOPSC','NOPS','RDBE', 'RDBEInt'],
'HCMin': 0.1,
'HCMax': 3,
'NCMax': 4,
'OCMax': 3,
'PCMax': 2,
'SCMax': 3,
'RDBEMin': -1,
'RDBEMax': 40,
'PubChemScript':'http://pubchem.ncbi.nlm.nih.gov/search/search.cgi',
'ChemSpiderScript': 'http://www.chemspider.com/Search.aspx',
'METLINScript': 'http://metlin.scripps.edu/metabo_list_adv.php',
'HMDBScript': 'http://www.hmdb.ca/search',
'LipidMAPSScript': 'http://www.lipidmaps.org/data/structure/LMSDSearch.php',
}
massDefectPlot={
'xAxis': 'mz',
'yAxis': 'standard',
'nominalMass': 'floor',
'kendrickFormula': 'CH2',
'relIntCutoff': 0.0,
'removeIsotopes': 0,
'ignoreCharge': 1,
'showNotations': 0,
'showAllDocuments': 0,
}
compoundsSearch={
'massType': 0,
'maxCharge': 1,
'radicals': 0,
'adducts': ['Na','K'],
}
peakDifferences={
'aminoacids': 1,
'dipeptides': 0,
'massType': 0,
'tolerance': 0.1,
'consolidate': 0,
}
comparePeaklists={
'compare': 'peaklists',
'tolerance': 0.2,
'units': 'Da',
'ignoreCharge': 0,
'ratioCheck': 0,
'ratioDirection': 1,
'ratioThreshold': 2,
}
spectrumGenerator={
'fwhm': 0.1,
'points': 10,
'noise': 0,
'forceFwhm': 0,
'peakShape': 'gaussian',
'showPeaks': 1,
'showOverlay': 0,
'showFlipped': 0,
}
envelopeFit={
'loss': 'H',
'gain': 'H{2}',
'fit': 'spectrum',
'scaleMin': 0,
'scaleMax': 10,
'charge': 1,
'fwhm': 0.01,
'forceFwhm': 0,
'peakShape': 'gaussian',
'autoAlign': 1,
'relThreshold': 0.05,
}
mascot={
'common':{
'title':'',
'userName':'',
'userEmail':'',
'server': 'Matrix Science',
'searchType': 'pmf',
'filterAnnotations': 0,
'filterMatches': 0,
'filterUnselected': 0,
'filterIsotopes': 1,
'filterUnknown': 0,
},
'pmf':{
'database': 'SwissProt',
'taxonomy': 'All entries',
'enzyme': 'Trypsin',
'miscleavages': 1,
'fixedMods': [],
'variableMods': [],
'hiddenMods': 0,
'proteinMass': '',
'peptideTol': 0.1,
'peptideTolUnits': 'Da',
'massType': 'Monoisotopic',
'charge': '1+',
'decoy': 0,
'report': 'AUTO',
},
'sq':{
'database': 'SwissProt',
'taxonomy': 'All entries',
'enzyme': 'Trypsin',
'miscleavages': 1,
'fixedMods': [],
'variableMods': [],
'hiddenMods': 0,
'peptideTol': 0.1,
'peptideTolUnits': 'Da',
'msmsTol': 0.2,
'msmsTolUnits': 'Da',
'massType': 'Average',
'charge': '1+',
'instrument': 'Default',
'quantitation': 'None',
'decoy': 0,
'report': 'AUTO',
},
'mis':{
'database': 'SwissProt',
'taxonomy': 'All entries',
'enzyme': 'Trypsin',
'miscleavages': 1,
'fixedMods': [],
'variableMods': [],
'hiddenMods': 0,
'peptideMass': '',
'peptideTol': 0.1,
'peptideTolUnits': 'Da',
'msmsTol': 0.2,
'msmsTolUnits': 'Da',
'massType': 'Average',
'charge': '1+',
'instrument': 'Default',
'quantitation': 'None',
'decoy': 0,
'errorTolerant': 0,
'report': 'AUTO',
},
}
profound={
'script': 'http://prowl.rockefeller.edu/prowl-cgi/profound.exe',
'title':'',
'database': 'NCBI nr',
'taxonomy': 'All taxa',
'enzyme': 'Trypsin',
'miscleavages': 1,
'fixedMods': [],
'variableMods': [],
'proteinMassLow': 0,
'proteinMassHigh': 300,
'proteinPILow': 0,
'proteinPIHigh': 14,
'peptideTol': 0.1,
'peptideTolUnits': 'Da',
'massType': 'Monoisotopic',
'charge': 'MH+',
'ranking': 'expect',
'expectation': 1,
'candidates': 10,
'filterAnnotations': 0,
'filterMatches': 0,
'filterUnselected': 0,
'filterIsotopes': 1,
'filterUnknown': 0,
}
prospector={
'common':{
'title':'',
'script': 'http://prospector.ucsf.edu/prospector/cgi-bin/mssearch.cgi',
'searchType': 'msfit',
'filterAnnotations': 0,
'filterMatches': 0,
'filterUnselected': 0,
'filterIsotopes': 1,
'filterUnknown': 0,
},
'msfit':{
'database': 'SwissProt',
'taxonomy': 'All',
'enzyme': 'Trypsin',
'miscleavages': 1,
'fixedMods': [],
'variableMods': [],
'proteinMassLow': 0,
'proteinMassHigh': 300,
'proteinPILow': 0,
'proteinPIHigh': 14,
'peptideTol': 0.1,
'peptideTolUnits': 'Da',
'massType': 'Monoisotopic',
'instrument': 'MALDI-TOFTOF',
'minMatches': 4,
'maxMods': 1,
'report': 5,
'pfactor': 0.4,
},
'mstag':{
'database': 'SwissProt',
'taxonomy': 'All',
'enzyme': 'Trypsin',
'miscleavages': 1,
'fixedMods': [],
'variableMods': [],
'peptideMass': '',
'peptideTol': 0.1,
'peptideTolUnits': 'Da',
'peptideCharge': '1',
'msmsTol': 0.2,
'msmsTolUnits': 'Da',
'massType': 'Monoisotopic',
'instrument': 'MALDI-TOFTOF',
'maxMods': 1,
'report': 5,
},
}
links={
'mMassHomepage': 'http://www.mmass.org/',
'mMassForum': 'http://forum.mmass.org/',
'mMassTwitter': 'http://www.twitter.com/mmassorg/',
'mMassCite': 'http://www.mmass.org/donate/papers.php',
'mMassDonate': 'http://www.mmass.org/donate/',
'mMassDownload': 'http://www.mmass.org/download/',
'mMassWhatsNew': 'http://www.mmass.org/download/history.php',
'biomedmstools': 'http://ms.biomed.cas.cz/MSTools/',
'blast': 'http://www.ebi.ac.uk/Tools/blastall/',
'clustalw': 'http://www.ebi.ac.uk/Tools/clustalw/',
'deltamass': 'http://www.abrf.org/index.cfm/dm.home',
'emblebi': 'http://www.ebi.ac.uk/services/',
'expasy': 'http://www.expasy.org/',
'fasta': 'http://www.ebi.ac.uk/Tools/fasta33/',
'matrixscience': 'http://www.matrixscience.com/',
'muscle': 'http://phylogenomics.berkeley.edu/cgi-bin/muscle/input_muscle.py',
'ncbi': 'http://www.ncbi.nlm.nih.gov/Entrez/',
'pdb': 'http://www.rcsb.org/pdb/',
'pir': 'http://pir.georgetown.edu/',
'profound': 'http://prowl.rockefeller.edu/prowl-cgi/profound.exe',
'prospector': 'http://prospector.ucsf.edu/',
'unimod': 'http://www.unimod.org/',
'uniprot': 'http://www.uniprot.org/',
}
replacements={
'sequences':{
'general':{
'pattern': '^([A-Z0-9_]+[\.0-9]*)$',
'url': 'http://www.ncbi.nlm.nih.gov/protein/%s',
},
'gi':{
'pattern': '^gi\|?([0-9]+[\.0-9]*)$',
'url': 'http://www.ncbi.nlm.nih.gov/protein/%s',
},
'gb':{
'pattern': '^gb\|?([A-Z]{3}[0-9]{5}[\.0-9]*)$',
'url': 'http://www.ncbi.nlm.nih.gov/protein/%s',
},
'sp':{
'pattern': '^sp\|?([A-Z][A-Z0-9]+)$',
'url': 'http://www.uniprot.org/uniprot/%s',
},
'ref':{
'pattern': '^ref\|?([A-Z]{2}_[0-9]+[\.0-9]*)$',
'url': 'http://www.ncbi.nlm.nih.gov/protein/%s',
},
},
'compounds':{
'PubChemC':{
'pattern': 'CID([0-9]{1,10})',
'url': 'http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi?cid=%s',
},
'LipidMaps':{
'pattern': '(LM[A-Z]{2}[0-9]{4}[0-9A-Z]{2}[0-9]{2})',
'url': 'http://www.lipidmaps.org/data/LMSDRecord.php?LMID=%s',
},
'NORINE':{
'pattern': '(NOR[0-9]{5})',
'url': 'http://bioinfo.lifl.fr/norine/result.jsp?ID=%s',
},
},
}
# LOAD AND SAVE CONFIG FILE
# -------------------------
def loadConfig(path=os.path.join(confdir, 'config.xml')):
"""Parse config XML and get data."""
# parse XML
document = xml.dom.minidom.parse(path)
# main
mainTags = document.getElementsByTagName('main')
if mainTags:
_getParams(mainTags[0], main)
if type(main['cursorInfo']) != list:
main['cursorInfo'] = main['cursorInfo'].split(';')
if type(main['peaklistColumns']) != list:
main['peaklistColumns'] = main['peaklistColumns'].split(';')
# recent files
recentTags = document.getElementsByTagName('recent')
if recentTags:
pathTags = recentTags[0].getElementsByTagName('path')
if pathTags:
del recent[:]
for pathTag in pathTags:
recent.append(pathTag.getAttribute('value'))
# colours
coloursTags = document.getElementsByTagName('colours')
if coloursTags:
colourTags = coloursTags[0].getElementsByTagName('colour')
if colourTags:
del colours[:]
for colourTag in colourTags:
col = colourTag.getAttribute('value')
colours.append([int(c, 16) for c in (col[0:2], col[2:4], col[4:6])])
# export
exportTags = document.getElementsByTagName('export')
if exportTags:
_getParams(exportTags[0], export)
if type(export['peaklistColumns']) != list:
export['peaklistColumns'] = export['peaklistColumns'].split(';')
# spectrum
spectrumTags = document.getElementsByTagName('spectrum')
if spectrumTags:
_getParams(spectrumTags[0], spectrum)
if type(spectrum['tickColour']) != list:
col = spectrum['tickColour']
spectrum['tickColour'] = [int(c, 16) for c in (col[0:2], col[2:4], col[4:6])]
if type(spectrum['tmpSpectrumColour']) != list:
col = spectrum['tmpSpectrumColour']
spectrum['tmpSpectrumColour'] = [int(c, 16) for c in (col[0:2], col[2:4], col[4:6])]
if type(spectrum['notationMarksColour']) != list:
col = spectrum['notationMarksColour']
spectrum['notationMarksColour'] = [int(c, 16) for c in (col[0:2], col[2:4], col[4:6])]
# match
matchTags = document.getElementsByTagName('match')
if matchTags:
_getParams(matchTags[0], match)
# processing
processingTags = document.getElementsByTagName('processing')
if processingTags:
cropTags = processingTags[0].getElementsByTagName('crop')
if cropTags:
_getParams(cropTags[0], processing['crop'])
baselineTags = processingTags[0].getElementsByTagName('baseline')
if baselineTags:
_getParams(baselineTags[0], processing['baseline'])
smoothingTags = processingTags[0].getElementsByTagName('smoothing')
if smoothingTags:
_getParams(smoothingTags[0], processing['smoothing'])
peakpickingTags = processingTags[0].getElementsByTagName('peakpicking')
if peakpickingTags:
_getParams(peakpickingTags[0], processing['peakpicking'])
deisotopingTags = processingTags[0].getElementsByTagName('deisotoping')
if deisotopingTags:
_getParams(deisotopingTags[0], processing['deisotoping'])
deconvolutionTags = processingTags[0].getElementsByTagName('deconvolution')
if deconvolutionTags:
_getParams(deconvolutionTags[0], processing['deconvolution'])
# calibration
calibrationTags = document.getElementsByTagName('calibration')
if calibrationTags:
_getParams(calibrationTags[0], calibration)
# sequence
sequenceTags = document.getElementsByTagName('sequence')
if sequenceTags:
editorTags = sequenceTags[0].getElementsByTagName('editor')
if editorTags:
_getParams(editorTags[0], sequence['editor'])
digestTags = sequenceTags[0].getElementsByTagName('digest')
if digestTags:
_getParams(digestTags[0], sequence['digest'])
fragmentTags = sequenceTags[0].getElementsByTagName('fragment')
if fragmentTags:
_getParams(fragmentTags[0], sequence['fragment'])
searchTags = sequenceTags[0].getElementsByTagName('search')
if searchTags:
_getParams(searchTags[0], sequence['search'])
if type(sequence['fragment']['fragments']) != list:
sequence['fragment']['fragments'] = sequence['fragment']['fragments'].split(';')
# mass calculator
massCalculatorTags = document.getElementsByTagName('massCalculator')
if massCalculatorTags:
_getParams(massCalculatorTags[0], massCalculator)
# mass to formula
massToFormulaTags = document.getElementsByTagName('massToFormula')
if massToFormulaTags:
_getParams(massToFormulaTags[0], massToFormula)
if type(massToFormula['rules']) != list:
massToFormula['rules'] = massToFormula['rules'].split(';')
# mass defect plot
massDefectPlotTags = document.getElementsByTagName('massDefectPlot')
if massDefectPlotTags:
_getParams(massDefectPlotTags[0], massDefectPlot)
# compounds search
compoundsSearchTags = document.getElementsByTagName('compoundsSearch')
if compoundsSearchTags:
_getParams(compoundsSearchTags[0], compoundsSearch)
if type(compoundsSearch['adducts']) != list:
compoundsSearch['adducts'] = compoundsSearch['adducts'].split(';')
# peak differences
peakDifferencesTags = document.getElementsByTagName('peakDifferences')
if peakDifferencesTags:
_getParams(peakDifferencesTags[0], peakDifferences)
# compare peaklists
comparePeaklistsTags = document.getElementsByTagName('comparePeaklists')
if comparePeaklistsTags:
_getParams(comparePeaklistsTags[0], comparePeaklists)
# spectrum generator
spectrumGeneratorTags = document.getElementsByTagName('spectrumGenerator')
if spectrumGeneratorTags:
_getParams(spectrumGeneratorTags[0], spectrumGenerator)
# envelope fit
envelopeFitTags = document.getElementsByTagName('envelopeFit')
if envelopeFitTags:
_getParams(envelopeFitTags[0], envelopeFit)
# mascot
mascotTags = document.getElementsByTagName('mascot')
if mascotTags:
commonTags = mascotTags[0].getElementsByTagName('common')
if commonTags:
_getParams(commonTags[0], mascot['common'])
pmfTags = mascotTags[0].getElementsByTagName('pmf')
if pmfTags:
_getParams(pmfTags[0], mascot['pmf'])
sqTags = mascotTags[0].getElementsByTagName('sq')
if sqTags:
_getParams(sqTags[0], mascot['sq'])
misTags = mascotTags[0].getElementsByTagName('mis')
if misTags:
_getParams(misTags[0], mascot['mis'])
for key in ('pmf', 'sq', 'mis'):
if type(mascot[key]['fixedMods']) != list:
mascot[key]['fixedMods'] = mascot[key]['fixedMods'].split(';')
if type(mascot[key]['variableMods']) != list:
mascot[key]['variableMods'] = mascot[key]['variableMods'].split(';')
# profound
profoundTags = document.getElementsByTagName('profound')
if profoundTags:
_getParams(profoundTags[0], profound)
if type(profound['fixedMods']) != list:
profound['fixedMods'] = profound['fixedMods'].split(';')
if type(profound['variableMods']) != list:
profound['variableMods'] = profound['variableMods'].split(';')
# prospector
prospectorTags = document.getElementsByTagName('prospector')
if prospectorTags:
commonTags = prospectorTags[0].getElementsByTagName('common')
if commonTags:
_getParams(commonTags[0], prospector['common'])
msfitTags = prospectorTags[0].getElementsByTagName('msfit')
if msfitTags:
_getParams(msfitTags[0], prospector['msfit'])
mstagTags = prospectorTags[0].getElementsByTagName('mstag')
if mstagTags:
_getParams(mstagTags[0], prospector['mstag'])
for key in ('msfit', 'mstag'):
if type(prospector[key]['fixedMods']) != list:
prospector[key]['fixedMods'] = prospector[key]['fixedMods'].split(';')
if type(prospector[key]['variableMods']) != list:
prospector[key]['variableMods'] = prospector[key]['variableMods'].split(';')
# links
linksTags = document.getElementsByTagName('links')
if linksTags:
linkTags = linksTags[0].getElementsByTagName('link')
for linkTag in linkTags:
name = linkTag.getAttribute('name')
value = linkTag.getAttribute('value')
if name not in ('mMassHomepage', 'mMassForum', 'mMassTwitter', 'mMassCite', 'mMassDonate', 'mMassDownload'):
links[name] = value
# ----
def saveConfig(path=os.path.join(confdir, 'config.xml')):
"""Make and save config XML."""
buff = '<?xml version="1.0" encoding="utf-8" ?>\n'
buff += '<mMassConfig version="1.0">\n\n'
# main
buff += ' <main>\n'
buff += ' <param name="appWidth" value="%d" type="int" />\n' % (main['appWidth'])
buff += ' <param name="appHeight" value="%d" type="int" />\n' % (main['appHeight'])
buff += ' <param name="appMaximized" value="%d" type="int" />\n' % (bool(main['appMaximized']))
buff += ' <param name="layout" value="%s" type="str" />\n' % (_escape(main['layout']))
buff += ' <param name="documentsWidth" value="%d" type="int" />\n' % (main['documentsWidth'])
buff += ' <param name="documentsHeight" value="%d" type="int" />\n' % (main['documentsHeight'])
buff += ' <param name="peaklistWidth" value="%d" type="int" />\n' % (main['peaklistWidth'])
buff += ' <param name="peaklistHeight" value="%d" type="int" />\n' % (main['peaklistHeight'])
buff += ' <param name="reverseScrolling" value="%d" type="int" />\n' % (bool(main['reverseScrolling']))
buff += ' <param name="macListCtrlGeneric" value="%d" type="int" />\n' % (bool(main['macListCtrlGeneric']))
buff += ' <param name="cursorInfo" value="%s" type="str" />\n' % (';'.join(main['cursorInfo']))
buff += ' <param name="peaklistColumns" value="%s" type="str" />\n' % (';'.join(main['peaklistColumns']))
buff += ' <param name="mzDigits" value="%d" type="int" />\n' % (main['mzDigits'])
buff += ' <param name="intDigits" value="%d" type="int" />\n' % (main['intDigits'])
buff += ' <param name="ppmDigits" value="%d" type="int" />\n' % (main['ppmDigits'])
buff += ' <param name="chargeDigits" value="%d" type="int" />\n' % (main['chargeDigits'])
buff += ' <param name="lastDir" value="%s" type="unicode" />\n' % (_escape(main['lastDir']))
buff += ' <param name="lastSeqDir" value="%s" type="unicode" />\n' % (_escape(main['lastSeqDir']))
buff += ' <param name="errorUnits" value="%s" type="str" />\n' % (main['errorUnits'])
buff += ' <param name="printQuality" value="%d" type="int" />\n' % (main['printQuality'])
buff += ' <param name="useServer" value="%d" type="int" />\n' % (bool(main['useServer']))
buff += ' <param name="serverPort" value="%d" type="int" />\n' % (main['serverPort'])
buff += ' <param name="updatesEnabled" value="%d" type="int" />\n' % (bool(main['updatesEnabled']))
buff += ' <param name="updatesChecked" value="%s" type="str" />\n' % (main['updatesChecked'])
buff += ' <param name="updatesCurrent" value="%s" type="str" />\n' % (main['updatesCurrent'])
buff += ' <param name="updatesAvailable" value="%s" type="str" />\n' % (main['updatesAvailable'])
buff += ' <param name="compassMode" value="%s" type="str" />\n' % (main['compassMode'])
buff += ' <param name="compassFormat" value="%s" type="str" />\n' % (main['compassFormat'])
buff += ' <param name="compassDeleteFile" value="%d" type="int" />\n' % (bool(main['compassDeleteFile']))
buff += ' </main>\n\n'
# recent files
buff += ' <recent>\n'
for item in recent:
buff += ' <path value="%s" />\n' % (_escape(item))
buff += ' </recent>\n\n'
# colours
buff += ' <colours>\n'
for item in colours:
buff += ' <colour value="%02x%02x%02x" />\n' % tuple(item)
buff += ' </colours>\n\n'
# export
buff += ' <export>\n'
buff += ' <param name="imageWidth" value="%.1f" type="float" />\n' % (export['imageWidth'])
buff += ' <param name="imageHeight" value="%.1f" type="float" />\n' % (export['imageHeight'])
buff += ' <param name="imageUnits" value="%s" type="str" />\n' % (export['imageUnits'])
buff += ' <param name="imageResolution" value="%d" type="int" />\n' % (export['imageResolution'])
buff += ' <param name="imageFontsScale" value="%d" type="int" />\n' % (export['imageFontsScale'])
buff += ' <param name="imageDrawingsScale" value="%d" type="int" />\n' % (export['imageDrawingsScale'])
buff += ' <param name="imageFormat" value="%s" type="str" />\n' % (export['imageFormat'])
buff += ' <param name="peaklistColumns" value="%s" type="str" />\n' % (';'.join(export['peaklistColumns']))
buff += ' <param name="peaklistFormat" value="%s" type="str" />\n' % (export['peaklistFormat'])
buff += ' <param name="peaklistSeparator" value="%s" type="str" />\n' % (export['peaklistSeparator'])
buff += ' <param name="spectrumSeparator" value="%s" type="str" />\n' % (export['spectrumSeparator'])
buff += ' </export>\n\n'
# spectrum
buff += ' <spectrum>\n'
buff += ' <param name="xLabel" value="%s" type="unicode" />\n' % (_escape(spectrum['xLabel']))
buff += ' <param name="yLabel" value="%s" type="unicode" />\n' % (_escape(spectrum['yLabel']))
buff += ' <param name="showGrid" value="%d" type="int" />\n' % (bool(spectrum['showGrid']))
buff += ' <param name="showMinorTicks" value="%d" type="int" />\n' % (bool(spectrum['showMinorTicks']))
buff += ' <param name="showLegend" value="%d" type="int" />\n' % (bool(spectrum['showLegend']))
buff += ' <param name="showPosBars" value="%d" type="int" />\n' % (bool(spectrum['showPosBars']))
buff += ' <param name="showGel" value="%d" type="int" />\n' % (bool(spectrum['showGel']))
buff += ' <param name="showGelLegend" value="%d" type="int" />\n' % (bool(spectrum['showGelLegend']))
buff += ' <param name="showTracker" value="%d" type="int" />\n' % (bool(spectrum['showTracker']))
buff += ' <param name="showNotations" value="%d" type="int" />\n' % (bool(spectrum['showNotations']))
buff += ' <param name="showDataPoints" value="%d" type="int" />\n' % (bool(spectrum['showDataPoints']))
buff += ' <param name="showLabels" value="%d" type="int" />\n' % (bool(spectrum['showLabels']))
buff += ' <param name="showAllLabels" value="%d" type="int" />\n' % (bool(spectrum['showAllLabels']))
buff += ' <param name="showTicks" value="%d" type="int" />\n' % (bool(spectrum['showTicks']))
buff += ' <param name="showCursorImage" value="%d" type="int" />\n' % (bool(spectrum['showCursorImage']))
buff += ' <param name="posBarSize" value="%d" type="int" />\n' % (spectrum['posBarSize'])
buff += ' <param name="gelHeight" value="%d" type="int" />\n' % (spectrum['gelHeight'])
buff += ' <param name="autoscale" value="%d" type="int" />\n' % (bool(spectrum['autoscale']))
buff += ' <param name="overlapLabels" value="%d" type="int" />\n' % (bool(spectrum['overlapLabels']))
buff += ' <param name="checkLimits" value="%d" type="int" />\n' % (bool(spectrum['checkLimits']))
buff += ' <param name="labelAngle" value="%d" type="int" />\n' % (spectrum['labelAngle'])
buff += ' <param name="labelCharge" value="%d" type="int" />\n' % (bool(spectrum['labelCharge']))
buff += ' <param name="labelGroup" value="%d" type="int" />\n' % (bool(spectrum['labelGroup']))
buff += ' <param name="labelBgr" value="%d" type="int" />\n' % (bool(spectrum['labelBgr']))
buff += ' <param name="labelFontSize" value="%d" type="int" />\n' % (spectrum['labelFontSize'])
buff += ' <param name="axisFontSize" value="%d" type="int" />\n' % (spectrum['axisFontSize'])
buff += ' <param name="tickColour" value="%02x%02x%02x" type="str" />\n' % tuple(spectrum['tickColour'])
buff += ' <param name="tmpSpectrumColour" value="%02x%02x%02x" type="str" />\n' % tuple(spectrum['tmpSpectrumColour'])
buff += ' <param name="notationMarksColour" value="%02x%02x%02x" type="str" />\n' % tuple(spectrum['notationMarksColour'])
buff += ' <param name="notationMaxLength" value="%d" type="int" />\n' % (spectrum['notationMaxLength'])
buff += ' <param name="notationMarks" value="%d" type="int" />\n' % (bool(spectrum['notationMarks']))
buff += ' <param name="notationLabels" value="%d" type="int" />\n' % (bool(spectrum['notationLabels']))
buff += ' <param name="notationMZ" value="%d" type="int" />\n' % (bool(spectrum['notationMZ']))
buff += ' </spectrum>\n\n'
# match
buff += ' <match>\n'
buff += ' <param name="tolerance" value="%f" type="float" />\n' % (match['tolerance'])
buff += ' <param name="units" value="%s" type="str" />\n' % (match['units'])
buff += ' <param name="ignoreCharge" value="%d" type="int" />\n' % (bool(match['ignoreCharge']))
buff += ' <param name="filterAnnotations" value="%d" type="int" />\n' % (bool(match['filterAnnotations']))
buff += ' <param name="filterMatches" value="%d" type="int" />\n' % (bool(match['filterMatches']))
buff += ' <param name="filterUnselected" value="%d" type="int" />\n' % (bool(match['filterUnselected']))
buff += ' <param name="filterIsotopes" value="%d" type="int" />\n' % (bool(match['filterIsotopes']))
buff += ' <param name="filterUnknown" value="%d" type="int" />\n' % (bool(match['filterUnknown']))
buff += ' </match>\n\n'
# processing
buff += ' <processing>\n'
buff += ' <crop>\n'
buff += ' <param name="lowMass" value="%d" type="int" />\n' % (processing['crop']['lowMass'])
buff += ' <param name="highMass" value="%d" type="int" />\n' % (processing['crop']['highMass'])
buff += ' </crop>\n'
buff += ' <baseline>\n'
buff += ' <param name="precision" value="%d" type="int" />\n' % (processing['baseline']['precision'])
buff += ' <param name="offset" value="%f" type="float" />\n' % (processing['baseline']['offset'])
buff += ' </baseline>\n'
buff += ' <smoothing>\n'
buff += ' <param name="method" value="%s" type="str" />\n' % (processing['smoothing']['method'])
buff += ' <param name="windowSize" value="%f" type="float" />\n' % (processing['smoothing']['windowSize'])
buff += ' <param name="cycles" value="%d" type="int" />\n' % (processing['smoothing']['cycles'])
buff += ' </smoothing>\n'
buff += ' <peakpicking>\n'
buff += ' <param name="snThreshold" value="%f" type="float" />\n' % (processing['peakpicking']['snThreshold'])
buff += ' <param name="absIntThreshold" value="%f" type="float" />\n' % (processing['peakpicking']['absIntThreshold'])
buff += ' <param name="relIntThreshold" value="%f" type="float" />\n' % (processing['peakpicking']['relIntThreshold'])
buff += ' <param name="pickingHeight" value="%f" type="float" />\n' % (processing['peakpicking']['pickingHeight'])
buff += ' <param name="baseline" value="%d" type="int" />\n' % (bool(processing['peakpicking']['baseline']))
buff += ' <param name="smoothing" value="%d" type="int" />\n' % (bool(processing['peakpicking']['smoothing']))
buff += ' <param name="deisotoping" value="%d" type="int" />\n' % (bool(processing['peakpicking']['deisotoping']))
buff += ' <param name="removeShoulders" value="%d" type="int" />\n' % (bool(processing['peakpicking']['removeShoulders']))
buff += ' </peakpicking>\n'
buff += ' <deisotoping>\n'
buff += ' <param name="maxCharge" value="%d" type="int" />\n' % (processing['deisotoping']['maxCharge'])
buff += ' <param name="massTolerance" value="%f" type="float" />\n' % (processing['deisotoping']['massTolerance'])
buff += ' <param name="intTolerance" value="%f" type="float" />\n' % (processing['deisotoping']['intTolerance'])
buff += ' <param name="removeIsotopes" value="%d" type="int" />\n' % (bool(processing['deisotoping']['removeIsotopes']))
buff += ' <param name="removeUnknown" value="%d" type="int" />\n' % (bool(processing['deisotoping']['removeUnknown']))
buff += ' <param name="labelEnvelope" value="%s" type="str" />\n' % (processing['deisotoping']['labelEnvelope'])
buff += ' <param name="envelopeIntensity" value="%s" type="str" />\n' % (processing['deisotoping']['envelopeIntensity'])
buff += ' <param name="setAsMonoisotopic" value="%d" type="int" />\n' % (bool(processing['deisotoping']['setAsMonoisotopic']))
buff += ' </deisotoping>\n'
buff += ' <deconvolution>\n'
buff += ' <param name="massType" value="%d" type="int" />\n' % (processing['deconvolution']['massType'])
buff += ' <param name="groupWindow" value="%f" type="float" />\n' % (processing['deconvolution']['groupWindow'])
buff += ' <param name="groupPeaks" value="%d" type="int" />\n' % (bool(processing['deconvolution']['groupPeaks']))
buff += ' <param name="forceGroupWindow" value="%d" type="int" />\n' % (bool(processing['deconvolution']['forceGroupWindow']))
buff += ' </deconvolution>\n'
buff += ' <batch>\n'
buff += ' <param name="math" value="%d" type="int" />\n' % (bool(processing['batch']['math']))
buff += ' <param name="crop" value="%d" type="int" />\n' % (bool(processing['batch']['crop']))
buff += ' <param name="baseline" value="%d" type="int" />\n' % (bool(processing['batch']['baseline']))
buff += ' <param name="smoothing" value="%d" type="int" />\n' % (bool(processing['batch']['smoothing']))
buff += ' <param name="peakpicking" value="%d" type="int" />\n' % (bool(processing['batch']['peakpicking']))
buff += ' <param name="deisotoping" value="%d" type="int" />\n' % (bool(processing['batch']['deisotoping']))
buff += ' <param name="deconvolution" value="%d" type="int" />\n' % (bool(processing['batch']['deconvolution']))
buff += ' </batch>\n'
buff += ' </processing>\n\n'
# calibration
buff += ' <calibration>\n'
buff += ' <param name="fitting" value="%s" type="str" />\n' % (calibration['fitting'])
buff += ' <param name="tolerance" value="%f" type="float" />\n' % (calibration['tolerance'])
buff += ' <param name="units" value="%s" type="str" />\n' % (calibration['units'])
buff += ' <param name="statCutOff" value="%d" type="int" />\n' % (calibration['statCutOff'])
buff += ' </calibration>\n\n'
# sequence
buff += ' <sequence>\n'
buff += ' <editor>\n'
buff += ' <param name="gridSize" value="%d" type="int" />\n' % (sequence['editor']['gridSize'])
buff += ' </editor>\n'
buff += ' <digest>\n'
buff += ' <param name="maxMods" value="%d" type="int" />\n' % (sequence['digest']['maxMods'])
buff += ' <param name="maxCharge" value="%d" type="int" />\n' % (sequence['digest']['maxCharge'])
buff += ' <param name="massType" value="%d" type="int" />\n' % (sequence['digest']['massType'])
buff += ' <param name="enzyme" value="%s" type="unicode" />\n' % (_escape(sequence['digest']['enzyme']))
buff += ' <param name="miscl" value="%d" type="int" />\n' % (sequence['digest']['miscl'])
buff += ' <param name="lowMass" value="%d" type="int" />\n' % (sequence['digest']['lowMass'])
buff += ' <param name="highMass" value="%d" type="int" />\n' % (sequence['digest']['highMass'])
buff += ' <param name="retainPos" value="%d" type="int" />\n' % (bool(sequence['digest']['retainPos']))
buff += ' <param name="allowMods" value="%d" type="int" />\n' % (bool(sequence['digest']['allowMods']))
buff += ' </digest>\n'
buff += ' <fragment>\n'
buff += ' <param name="maxMods" value="%d" type="int" />\n' % (sequence['fragment']['maxMods'])
buff += ' <param name="maxCharge" value="%d" type="int" />\n' % (sequence['fragment']['maxCharge'])
buff += ' <param name="massType" value="%d" type="int" />\n' % (sequence['fragment']['massType'])
buff += ' <param name="fragments" value="%s" type="str" />\n' % (';'.join(sequence['fragment']['fragments']))
buff += ' <param name="maxLosses" value="%d" type="int" />\n' % (sequence['fragment']['maxLosses'])
buff += ' <param name="filterFragments" value="%d" type="int" />\n' % (bool(sequence['fragment']['filterFragments']))
buff += ' </fragment>\n'
buff += ' <search>\n'
buff += ' <param name="maxMods" value="%d" type="int" />\n' % (sequence['search']['maxMods'])
buff += ' <param name="charge" value="%d" type="int" />\n' % (sequence['search']['charge'])
buff += ' <param name="massType" value="%d" type="int" />\n' % (sequence['search']['massType'])
buff += ' <param name="enzyme" value="%s" type="unicode" />\n' % (_escape(sequence['search']['enzyme']))
buff += ' <param name="semiSpecific" value="%d" type="int" />\n' % (bool(sequence['search']['semiSpecific']))
buff += ' <param name="tolerance" value="%f" type="float" />\n' % (sequence['search']['tolerance'])
buff += ' <param name="units" value="%s" type="str" />\n' % (sequence['search']['units'])
buff += ' <param name="retainPos" value="%d" type="int" />\n' % (bool(sequence['search']['retainPos']))
buff += ' </search>\n'
buff += ' </sequence>\n\n'
# mass calculator
buff += ' <massCalculator>\n'
buff += ' <param name="ionseriesAgent" value="%s" type="str" />\n' % (massCalculator['ionseriesAgent'])
buff += ' <param name="ionseriesAgentCharge" value="%d" type="int" />\n' % (massCalculator['ionseriesAgentCharge'])
buff += ' <param name="ionseriesPolarity" value="%d" type="int" />\n' % (massCalculator['ionseriesPolarity'])
buff += ' <param name="patternFwhm" value="%f" type="float" />\n' % (massCalculator['patternFwhm'])
buff += ' <param name="patternThreshold" value="%f" type="float" />\n' % (massCalculator['patternThreshold'])
buff += ' <param name="patternShowPeaks" value="%d" type="int" />\n' % (bool(massCalculator['patternShowPeaks']))
buff += ' <param name="patternPeakShape" value="%s" type="unicode" />\n' % (_escape(massCalculator['patternPeakShape']))
buff += ' </massCalculator>\n\n'
# mass to formula
buff += ' <massToFormula>\n'
buff += ' <param name="countLimit" value="%d" type="int" />\n' % (massToFormula['countLimit'])
buff += ' <param name="massLimit" value="%d" type="int" />\n' % (massToFormula['massLimit'])
buff += ' <param name="charge" value="%d" type="int" />\n' % (massToFormula['charge'])
buff += ' <param name="ionization" value="%s" type="str" />\n' % (massToFormula['ionization'])
buff += ' <param name="tolerance" value="%f" type="float" />\n' % (massToFormula['tolerance'])
buff += ' <param name="units" value="%s" type="str" />\n' % (massToFormula['units'])
buff += ' <param name="formulaMin" value="%s" type="str" />\n' % (massToFormula['formulaMin'])
buff += ' <param name="formulaMax" value="%s" type="str" />\n' % (massToFormula['formulaMax'])
buff += ' <param name="autoCHNO" value="%d" type="int" />\n' % (bool(massToFormula['autoCHNO']))
buff += ' <param name="checkPattern" value="%d" type="int" />\n' % (bool(massToFormula['checkPattern']))
buff += ' <param name="rules" value="%s" type="str" />\n' % (';'.join(massToFormula['rules']))
buff += ' <param name="HCMin" value="%f" type="float" />\n' % (massToFormula['HCMin'])
buff += ' <param name="HCMax" value="%f" type="float" />\n' % (massToFormula['HCMax'])
buff += ' <param name="NCMax" value="%f" type="float" />\n' % (massToFormula['NCMax'])
buff += ' <param name="OCMax" value="%f" type="float" />\n' % (massToFormula['OCMax'])
buff += ' <param name="PCMax" value="%f" type="float" />\n' % (massToFormula['PCMax'])
buff += ' <param name="SCMax" value="%f" type="float" />\n' % (massToFormula['SCMax'])
buff += ' <param name="RDBEMin" value="%f" type="float" />\n' % (massToFormula['RDBEMin'])
buff += ' <param name="RDBEMax" value="%f" type="float" />\n' % (massToFormula['RDBEMax'])
buff += ' </massToFormula>\n\n'
# mass defect plot
buff += ' <massDefectPlot>\n'
buff += ' <param name="yAxis" value="%s" type="str" />\n' % (massDefectPlot['yAxis'])
buff += ' <param name="nominalMass" value="%s" type="str" />\n' % (massDefectPlot['nominalMass'])
buff += ' <param name="kendrickFormula" value="%s" type="str" />\n' % (massDefectPlot['kendrickFormula'])
buff += ' <param name="relIntCutoff" value="%f" type="float" />\n' % (massDefectPlot['relIntCutoff'])
buff += ' <param name="removeIsotopes" value="%d" type="int" />\n' % (bool(massDefectPlot['removeIsotopes']))
buff += ' <param name="ignoreCharge" value="%d" type="int" />\n' % (bool(massDefectPlot['ignoreCharge']))
buff += ' <param name="showNotations" value="%d" type="int" />\n' % (bool(massDefectPlot['showNotations']))
buff += ' </massDefectPlot>\n\n'
# compounds search
buff += ' <compoundsSearch>\n'
buff += ' <param name="massType" value="%d" type="int" />\n' % (compoundsSearch['massType'])
buff += ' <param name="maxCharge" value="%d" type="int" />\n' % (compoundsSearch['maxCharge'])
buff += ' <param name="radicals" value="%d" type="int" />\n' % (bool(compoundsSearch['radicals']))
buff += ' <param name="adducts" value="%s" type="str" />\n' % (';'.join(compoundsSearch['adducts']))
buff += ' </compoundsSearch>\n\n'
# peak differences
buff += ' <peakDifferences>\n'
buff += ' <param name="aminoacids" value="%d" type="int" />\n' % (bool(peakDifferences['aminoacids']))
buff += ' <param name="dipeptides" value="%d" type="int" />\n' % (bool(peakDifferences['dipeptides']))
buff += ' <param name="tolerance" value="%f" type="float" />\n' % (peakDifferences['tolerance'])
buff += ' <param name="massType" value="%d" type="int" />\n' % (peakDifferences['massType'])
buff += ' <param name="consolidate" value="%d" type="int" />\n' % (bool(peakDifferences['consolidate']))
buff += ' </peakDifferences>\n\n'
# compare peaklists
buff += ' <comparePeaklists>\n'
buff += ' <param name="tolerance" value="%f" type="float" />\n' % (comparePeaklists['tolerance'])
buff += ' <param name="units" value="%s" type="str" />\n' % (comparePeaklists['units'])
buff += ' <param name="ignoreCharge" value="%d" type="int" />\n' % (bool(comparePeaklists['ignoreCharge']))
buff += ' <param name="ratioCheck" value="%d" type="int" />\n' % (bool(comparePeaklists['ratioCheck']))
buff += ' <param name="ratioDirection" value="%d" type="int" />\n' % (comparePeaklists['ratioDirection'])
buff += ' <param name="ratioThreshold" value="%f" type="float" />\n' % (comparePeaklists['ratioThreshold'])
buff += ' </comparePeaklists>\n\n'
# spectrum generator
buff += ' <spectrumGenerator>\n'
buff += ' <param name="fwhm" value="%f" type="float" />\n' % (spectrumGenerator['fwhm'])
buff += ' <param name="points" value="%d" type="int" />\n' % (spectrumGenerator['points'])
buff += ' <param name="noise" value="%f" type="float" />\n' % (spectrumGenerator['noise'])
buff += ' <param name="forceFwhm" value="%d" type="int" />\n' % (bool(spectrumGenerator['forceFwhm']))
buff += ' <param name="peakShape" value="%s" type="unicode" />\n' % (_escape(spectrumGenerator['peakShape']))
buff += ' <param name="showPeaks" value="%d" type="int" />\n' % (bool(spectrumGenerator['showPeaks']))
buff += ' <param name="showOverlay" value="%d" type="int" />\n' % (bool(spectrumGenerator['showOverlay']))
buff += ' </spectrumGenerator>\n\n'
# envelope fit
buff += ' <envelopeFit>\n'
buff += ' <param name="fit" value="%s" type="str" />\n' % (envelopeFit['fit'])
buff += ' <param name="fwhm" value="%f" type="float" />\n' % (envelopeFit['fwhm'])
buff += ' <param name="forceFwhm" value="%d" type="int" />\n' % (bool(envelopeFit['forceFwhm']))
buff += ' <param name="peakShape" value="%s" type="unicode" />\n' % (_escape(envelopeFit['peakShape']))
buff += ' <param name="autoAlign" value="%d" type="int" />\n' % (bool(envelopeFit['autoAlign']))
buff += ' <param name="relThreshold" value="%f" type="float" />\n' % (envelopeFit['relThreshold'])
buff += ' </envelopeFit>\n\n'
# mascot
buff += ' <mascot>\n'
buff += ' <common>\n'
buff += ' <param name="server" value="%s" type="unicode" />\n' % (_escape(mascot['common']['server']))
buff += ' <param name="searchType" value="%s" type="str" />\n' % (mascot['common']['searchType'])
buff += ' <param name="userName" value="%s" type="unicode" />\n' % (_escape(mascot['common']['userName']))
buff += ' <param name="userEmail" value="%s" type="unicode" />\n' % (_escape(mascot['common']['userEmail']))
buff += ' <param name="filterAnnotations" value="%d" type="int" />\n' % (bool(mascot['common']['filterAnnotations']))
buff += ' <param name="filterMatches" value="%d" type="int" />\n' % (bool(mascot['common']['filterMatches']))
buff += ' <param name="filterUnselected" value="%d" type="int" />\n' % (bool(mascot['common']['filterUnselected']))
buff += ' <param name="filterIsotopes" value="%d" type="int" />\n' % (bool(mascot['common']['filterIsotopes']))
buff += ' <param name="filterUnknown" value="%d" type="int" />\n' % (bool(mascot['common']['filterUnknown']))
buff += ' </common>\n'
buff += ' <pmf>\n'
buff += ' <param name="database" value="%s" type="unicode" />\n' % (mascot['pmf']['database'])
buff += ' <param name="taxonomy" value="%s" type="unicode" />\n' % (mascot['pmf']['taxonomy'])
buff += ' <param name="enzyme" value="%s" type="unicode" />\n' % (mascot['pmf']['enzyme'])
buff += ' <param name="miscleavages" value="%s" type="unicode" />\n' % (mascot['pmf']['miscleavages'])
buff += ' <param name="fixedMods" value="%s" type="unicode" />\n' % (';'.join(mascot['pmf']['fixedMods']))
buff += ' <param name="variableMods" value="%s" type="unicode" />\n' % (';'.join(mascot['pmf']['variableMods']))
buff += ' <param name="hiddenMods" value="%d" type="int" />\n' % (bool(mascot['pmf']['hiddenMods']))
buff += ' <param name="proteinMass" value="%s" type="unicode" />\n' % (mascot['pmf']['proteinMass'])
buff += ' <param name="peptideTol" value="%s" type="unicode" />\n' % (mascot['pmf']['peptideTol'])
buff += ' <param name="peptideTolUnits" value="%s" type="unicode" />\n' % (mascot['pmf']['peptideTolUnits'])
buff += ' <param name="massType" value="%s" type="unicode" />\n' % (mascot['pmf']['massType'])
buff += ' <param name="charge" value="%s" type="unicode" />\n' % (mascot['pmf']['charge'])
buff += ' <param name="decoy" value="%d" type="int" />\n' % (bool(mascot['pmf']['decoy']))
buff += ' <param name="report" value="%s" type="unicode" />\n' % (mascot['pmf']['report'])
buff += ' </pmf>\n'
buff += ' <sq>\n'
buff += ' <param name="database" value="%s" type="unicode" />\n' % (mascot['sq']['database'])
buff += ' <param name="taxonomy" value="%s" type="unicode" />\n' % (mascot['sq']['taxonomy'])
buff += ' <param name="enzyme" value="%s" type="unicode" />\n' % (mascot['sq']['enzyme'])
buff += ' <param name="miscleavages" value="%s" type="unicode" />\n' % (mascot['sq']['miscleavages'])
buff += ' <param name="fixedMods" value="%s" type="unicode" />\n' % (';'.join(mascot['sq']['fixedMods']))
buff += ' <param name="variableMods" value="%s" type="unicode" />\n' % (';'.join(mascot['sq']['variableMods']))
buff += ' <param name="hiddenMods" value="%d" type="int" />\n' % (bool(mascot['sq']['hiddenMods']))
buff += ' <param name="peptideTol" value="%s" type="unicode" />\n' % (mascot['sq']['peptideTol'])
buff += ' <param name="peptideTolUnits" value="%s" type="unicode" />\n' % (mascot['sq']['peptideTolUnits'])
buff += ' <param name="msmsTol" value="%s" type="unicode" />\n' % (mascot['sq']['msmsTol'])
buff += ' <param name="msmsTolUnits" value="%s" type="unicode" />\n' % (mascot['sq']['msmsTolUnits'])
buff += ' <param name="massType" value="%s" type="unicode" />\n' % (mascot['sq']['massType'])
buff += ' <param name="charge" value="%s" type="unicode" />\n' % (mascot['sq']['charge'])
buff += ' <param name="instrument" value="%s" type="unicode" />\n' % (mascot['sq']['instrument'])
buff += ' <param name="quantitation" value="%s" type="unicode" />\n' % (mascot['sq']['quantitation'])
buff += ' <param name="decoy" value="%d" type="int" />\n' % (bool(mascot['sq']['decoy']))
buff += ' <param name="report" value="%s" type="unicode" />\n' % (mascot['sq']['report'])
buff += ' </sq>\n'
buff += ' <mis>\n'
buff += ' <param name="database" value="%s" type="unicode" />\n' % (mascot['mis']['database'])
buff += ' <param name="taxonomy" value="%s" type="unicode" />\n' % (mascot['mis']['taxonomy'])
buff += ' <param name="enzyme" value="%s" type="unicode" />\n' % (mascot['mis']['enzyme'])
buff += ' <param name="miscleavages" value="%s" type="unicode" />\n' % (mascot['mis']['miscleavages'])
buff += ' <param name="fixedMods" value="%s" type="unicode" />\n' % (';'.join(mascot['mis']['fixedMods']))
buff += ' <param name="variableMods" value="%s" type="unicode" />\n' % (';'.join(mascot['mis']['variableMods']))
buff += ' <param name="hiddenMods" value="%d" type="int" />\n' % (bool(mascot['mis']['hiddenMods']))
buff += ' <param name="peptideTol" value="%s" type="unicode" />\n' % (mascot['mis']['peptideTol'])
buff += ' <param name="peptideTolUnits" value="%s" type="unicode" />\n' % (mascot['mis']['peptideTolUnits'])
buff += ' <param name="msmsTol" value="%s" type="unicode" />\n' % (mascot['mis']['msmsTol'])
buff += ' <param name="msmsTolUnits" value="%s" type="unicode" />\n' % (mascot['mis']['msmsTolUnits'])
buff += ' <param name="massType" value="%s" type="unicode" />\n' % (mascot['mis']['massType'])
buff += ' <param name="charge" value="%s" type="unicode" />\n' % (mascot['mis']['charge'])
buff += ' <param name="instrument" value="%s" type="unicode" />\n' % (mascot['mis']['instrument'])
buff += ' <param name="quantitation" value="%s" type="unicode" />\n' % (mascot['mis']['quantitation'])
buff += ' <param name="errorTolerant" value="%d" type="int" />\n' % (bool(mascot['mis']['errorTolerant']))
buff += ' <param name="decoy" value="%d" type="int" />\n' % (bool(mascot['mis']['decoy']))
buff += ' <param name="report" value="%s" type="unicode" />\n' % (mascot['mis']['report'])
buff += ' </mis>\n'
buff += ' </mascot>\n\n'
# profound
buff += ' <profound>\n'
buff += ' <param name="script" value="%s" type="unicode" />\n' % (_escape(profound['script']))
buff += ' <param name="database" value="%s" type="unicode" />\n' % (profound['database'])
buff += ' <param name="taxonomy" value="%s" type="unicode" />\n' % (profound['taxonomy'])
buff += ' <param name="enzyme" value="%s" type="unicode" />\n' % (profound['enzyme'])
buff += ' <param name="miscleavages" value="%s" type="unicode" />\n' % (profound['miscleavages'])
buff += ' <param name="fixedMods" value="%s" type="unicode" />\n' % (';'.join(profound['fixedMods']))
buff += ' <param name="variableMods" value="%s" type="unicode" />\n' % (';'.join(profound['variableMods']))
buff += ' <param name="proteinMassLow" value="%f" type="float" />\n' % (profound['proteinMassLow'])
buff += ' <param name="proteinMassHigh" value="%f" type="float" />\n' % (profound['proteinMassHigh'])
buff += ' <param name="proteinPILow" value="%d" type="int" />\n' % (profound['proteinPILow'])
buff += ' <param name="proteinPIHigh" value="%d" type="int" />\n' % (profound['proteinPIHigh'])
buff += ' <param name="peptideTol" value="%f" type="float" />\n' % (profound['peptideTol'])
buff += ' <param name="peptideTolUnits" value="%s" type="unicode" />\n' % (profound['peptideTolUnits'])
buff += ' <param name="massType" value="%s" type="unicode" />\n' % (profound['massType'])
buff += ' <param name="charge" value="%s" type="unicode" />\n' % (profound['charge'])
buff += ' <param name="ranking" value="%s" type="unicode" />\n' % (profound['ranking'])
buff += ' <param name="expectation" value="%f" type="float" />\n' % (profound['expectation'])
buff += ' <param name="candidates" value="%d" type="int" />\n' % (profound['candidates'])
buff += ' <param name="filterAnnotations" value="%d" type="int" />\n' % (bool(profound['filterAnnotations']))
buff += ' <param name="filterMatches" value="%d" type="int" />\n' % (bool(profound['filterMatches']))
buff += ' <param name="filterUnselected" value="%d" type="int" />\n' % (bool(profound['filterUnselected']))
buff += ' <param name="filterIsotopes" value="%d" type="int" />\n' % (bool(profound['filterIsotopes']))
buff += ' <param name="filterUnknown" value="%d" type="int" />\n' % (bool(profound['filterUnknown']))
buff += ' </profound>\n\n'
# protein prospector
buff += ' <prospector>\n'
buff += ' <common>\n'
buff += ' <param name="script" value="%s" type="unicode" />\n' % (_escape(prospector['common']['script']))
buff += ' <param name="searchType" value="%s" type="str" />\n' % (prospector['common']['searchType'])
buff += ' <param name="filterAnnotations" value="%d" type="int" />\n' % (bool(prospector['common']['filterAnnotations']))
buff += ' <param name="filterMatches" value="%d" type="int" />\n' % (bool(prospector['common']['filterMatches']))
buff += ' <param name="filterUnselected" value="%d" type="int" />\n' % (bool(prospector['common']['filterUnselected']))
buff += ' <param name="filterIsotopes" value="%d" type="int" />\n' % (bool(prospector['common']['filterIsotopes']))
buff += ' <param name="filterUnknown" value="%d" type="int" />\n' % (bool(prospector['common']['filterUnknown']))
buff += ' </common>\n'
buff += ' <msfit>\n'
buff += ' <param name="database" value="%s" type="unicode" />\n' % (prospector['msfit']['database'])
buff += ' <param name="taxonomy" value="%s" type="unicode" />\n' % (prospector['msfit']['taxonomy'])
buff += ' <param name="enzyme" value="%s" type="unicode" />\n' % (prospector['msfit']['enzyme'])
buff += ' <param name="miscleavages" value="%s" type="unicode" />\n' % (prospector['msfit']['miscleavages'])
buff += ' <param name="fixedMods" value="%s" type="unicode" />\n' % (';'.join(prospector['msfit']['fixedMods']))
buff += ' <param name="variableMods" value="%s" type="unicode" />\n' % (';'.join(prospector['msfit']['variableMods']))
buff += ' <param name="proteinMassLow" value="%s" type="unicode" />\n' % (prospector['msfit']['proteinMassLow'])
buff += ' <param name="proteinMassHigh" value="%s" type="unicode" />\n' % (prospector['msfit']['proteinMassHigh'])
buff += ' <param name="proteinPILow" value="%s" type="unicode" />\n' % (prospector['msfit']['proteinPILow'])
buff += ' <param name="proteinPIHigh" value="%s" type="unicode" />\n' % (prospector['msfit']['proteinPIHigh'])
buff += ' <param name="peptideTol" value="%s" type="unicode" />\n' % (prospector['msfit']['peptideTol'])
buff += ' <param name="peptideTolUnits" value="%s" type="unicode" />\n' % (prospector['msfit']['peptideTolUnits'])
buff += ' <param name="massType" value="%s" type="unicode" />\n' % (prospector['msfit']['massType'])
buff += ' <param name="instrument" value="%s" type="unicode" />\n' % (prospector['msfit']['instrument'])
buff += ' <param name="minMatches" value="%s" type="unicode" />\n' % (prospector['msfit']['minMatches'])
buff += ' <param name="maxMods" value="%s" type="unicode" />\n' % (prospector['msfit']['maxMods'])
buff += ' <param name="report" value="%s" type="unicode" />\n' % (prospector['msfit']['report'])
buff += ' <param name="pfactor" value="%s" type="unicode" />\n' % (prospector['msfit']['pfactor'])
buff += ' </msfit>\n'
buff += ' <mstag>\n'
buff += ' <param name="database" value="%s" type="unicode" />\n' % (prospector['mstag']['database'])
buff += ' <param name="taxonomy" value="%s" type="unicode" />\n' % (prospector['mstag']['taxonomy'])
buff += ' <param name="enzyme" value="%s" type="unicode" />\n' % (prospector['mstag']['enzyme'])
buff += ' <param name="miscleavages" value="%s" type="unicode" />\n' % (prospector['mstag']['miscleavages'])
buff += ' <param name="fixedMods" value="%s" type="unicode" />\n' % (';'.join(prospector['mstag']['fixedMods']))
buff += ' <param name="variableMods" value="%s" type="unicode" />\n' % (';'.join(prospector['mstag']['variableMods']))
buff += ' <param name="peptideTol" value="%s" type="unicode" />\n' % (prospector['mstag']['peptideTol'])
buff += ' <param name="peptideTolUnits" value="%s" type="unicode" />\n' % (prospector['mstag']['peptideTolUnits'])
buff += ' <param name="peptideCharge" value="%s" type="unicode" />\n' % (prospector['mstag']['peptideCharge'])
buff += ' <param name="msmsTol" value="%s" type="unicode" />\n' % (prospector['mstag']['msmsTol'])
buff += ' <param name="msmsTolUnits" value="%s" type="unicode" />\n' % (prospector['mstag']['msmsTolUnits'])
buff += ' <param name="massType" value="%s" type="unicode" />\n' % (prospector['mstag']['massType'])
buff += ' <param name="instrument" value="%s" type="unicode" />\n' % (prospector['mstag']['instrument'])
buff += ' <param name="maxMods" value="%s" type="unicode" />\n' % (prospector['mstag']['maxMods'])
buff += ' <param name="report" value="%s" type="unicode" />\n' % (prospector['mstag']['report'])
buff += ' </mstag>\n'
buff += ' </prospector>\n\n'
# links
buff += ' <links>\n'
for name in links:
if name not in ('mMassHomepage', 'mMassForum', 'mMassTwitter', 'mMassCite', 'mMassDonate', 'mMassDownload'):
buff += ' <link name="%s" value="%s" />\n' % (_escape(name), _escape(links[name]))
buff += ' </links>\n\n'
buff += '</mMassConfig>'
# save config file
try:
save = file(path, 'w')
save.write(buff.encode("utf-8"))
save.close()
return True
except:
return False
# ----
def _getParams(sectionTag, section):
"""Get params from nodes."""
if sectionTag:
paramTags = sectionTag.getElementsByTagName('param')
if paramTags:
if paramTags:
for paramTag in paramTags:
name = paramTag.getAttribute('name')
value = paramTag.getAttribute('value')
valueType = paramTag.getAttribute('type')
if name in section:
if valueType in ('unicode', 'str', 'float', 'int'):
try:
section[name] = eval(valueType+'(value)')
except:
pass
# ----
def _escape(text):
"""Clear special characters such as <> etc."""
text = text.strip()
search = ('&', '"', "'", '<', '>')
replace = ('&', '"', ''', '<', '>')
for x, item in enumerate(search):
text = text.replace(item, replace[x])
return text
# ----
try: loadConfig()
except IOError: saveConfig()
|
lukauskas/mMass-fork
|
gui/config.py
|
Python
|
gpl-3.0
| 65,326
|
[
"BLAST",
"Gaussian"
] |
aa62b06459030f38967a8615f5f39ee4a70f12de2b0b9f20796501c280219574
|
#!/usr/bin/env python
""" refresh CS
"""
from __future__ import print_function
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
res = gRefresher.forceRefresh()
if not res['OK']:
print(res['Message'])
|
fstagni/DIRAC
|
tests/Jenkins/dirac-refresh-cs.py
|
Python
|
gpl-3.0
| 283
|
[
"DIRAC"
] |
cbd3c891a036d18e263ac458b5e0cf1d778e98ce3870b956c7e460880e307982
|
import PandasPatch
from iago import *
import cp2k
from Analyser import Analyser
|
ferchault/iago
|
src/iago/__init__.py
|
Python
|
mit
| 79
|
[
"CP2K"
] |
fa4b98b5030c47d7152fcad66c23592d10479bc8a47810bc3a09632d3caa543b
|
import numpy as np
class LineModel:
"""
Interface for the model of the spectral line.
Your line models must extend this class, and implement its methods.
Duck typing is very good, but this is cleaner, faster and more maintainable
as we have a lot of methods to check for. LBYL > EAFP here.
See `SingleGaussianLineModel` below for an implementation example.
"""
def __init__(self): # PEP compliance
pass
def parameters(self):
"""
Returns a list of strings, which are the (unique!) names of the
parameters of your line model.
"""
raise NotImplementedError()
def gibbs_parameter_index(self):
"""
Returns the index (an integer) of the parameter in the list defined
above that is subject to Gibbs within MH.
If None is returned, the Gibbs logic is skipped entirely.
WARNING : We're assuming that the Gibbsed parameter is the amplitude,
for performance, in the current runner implementation.
"""
return None
def min_boundaries(self, runner):
"""
Returns a list of the (default) minimum boundaries of the parameters of
your line model.
"""
raise NotImplementedError()
def max_boundaries(self, runner):
"""
Returns a list of the (default) maximum boundaries of the parameters of
your line model.
"""
raise NotImplementedError()
def post_jump(self, runner, old_parameters, new_parameters):
"""
Your model may want to mutate the `new_parameters` right after the
Cauchy jumping. The `old_parameters` are provided for convenience, you
should not mutate them. This hook is of course very much optional.
"""
pass
def modelize(self, runner, x, parameters):
"""
Returns a list of the same size as the input list `x`, containing the
values of this line model for the provided `parameters`.
"""
raise NotImplementedError()
class SingleGaussianLineModel(LineModel):
"""
A single gaussian curve, defined by its three usual parameters.
This is the default line model that `deconv3d` uses.
"""
def parameters(self):
return ['a', 'c', 'w']
def gibbs_parameter_index(self):
return 0
def min_boundaries(self, runner):
return [0, 0, 0]
def max_boundaries(self, runner):
"""
Note: The FSF is normalized, so we need to adjust the maximum of our
amplitude accordingly.
"""
cube = runner.cube
fsf = runner.fsf
fsf_max = np.amax(fsf)
a_max = np.amax(cube.data)
if fsf_max > 0:
a_max = a_max / fsf_max
return [a_max, cube.data.shape[0]-1, cube.data.shape[0]]
def modelize(self, runner, x, parameters):
"""
This model is a simple gaussian curve.
"""
return self.gaussian(x, parameters[0], parameters[1], parameters[2])
@staticmethod
def gaussian(x, a, c, w):
"""
Returns `g(x)`, `g` being a gaussian described by the other parameters :
a: Amplitude
c: Center
w: Standard deviation, aka. RMS Width
If `x` is a `ndarray`, the return value will be a `ndarray` too.
"""
return a * np.exp(-1. * (x - c) ** 2 / (2. * w ** 2))
|
irap-omp/deconv3d
|
lib/line_models.py
|
Python
|
mit
| 3,410
|
[
"Gaussian"
] |
99bb1347a21de011f0f35aa8b9d0ac6a9b7a51b2e33e877618bf9051fb376585
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides input and output from the CSSR file format.
"""
import re
from monty.io import zopen
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jan 24, 2012"
class Cssr:
"""
Basic object for working with Cssr file. Right now, only conversion from
a Structure to a Cssr file is supported.
"""
def __init__(self, structure):
"""
Args:
structure (Structure/IStructure): A structure to create the Cssr object.
"""
if not structure.is_ordered:
raise ValueError("Cssr file can only be constructed from ordered structure")
self.structure = structure
def __str__(self):
output = [
"{:.4f} {:.4f} {:.4f}".format(*self.structure.lattice.abc),
"{:.2f} {:.2f} {:.2f} SPGR = 1 P 1 OPT = 1".format(*self.structure.lattice.angles),
f"{len(self.structure)} 0",
f"0 {self.structure.formula}",
]
for i, site in enumerate(self.structure.sites):
output.append(f"{i + 1} {site.specie} {site.a:.4f} {site.b:.4f} {site.c:.4f}")
return "\n".join(output)
def write_file(self, filename):
"""
Write out a CSSR file.
Args:
filename (str): Filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
@staticmethod
def from_string(string):
"""
Reads a string representation to a Cssr object.
Args:
string (str): A string representation of a CSSR.
Returns:
Cssr object.
"""
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(i) for i in toks]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
latt = Lattice.from_parameters(*lengths, *angles)
sp = []
coords = []
for l in lines[4:]:
m = re.match(r"\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)", l.strip())
if m:
sp.append(m.group(1))
coords.append([float(m.group(i)) for i in range(2, 5)])
return Cssr(Structure(latt, sp, coords))
@staticmethod
def from_file(filename):
"""
Reads a CSSR file to a Cssr object.
Args:
filename (str): Filename to read from.
Returns:
Cssr object.
"""
with zopen(filename, "rt") as f:
return Cssr.from_string(f.read())
|
vorwerkc/pymatgen
|
pymatgen/io/cssr.py
|
Python
|
mit
| 2,834
|
[
"pymatgen"
] |
d367b6fe6ea8591c4a37ec661383d17e976be877c0ece7135c952aed189ce729
|
###############################################################
### Python Framework for VAPT v 1.0 ###
### ###
### Designed by Niraj M. ###
### niraj007m[at]gmail[dot]com ###
### This work is licensed under the Creative Commons ###
### Attribution-ShareAlike 3.0 Unported License. ###
### To view a copy of this license, visit ###
### http://creativecommons.org/licenses/by-sa/3.0/ or send a###
### letter to Creative Commons, PO Box 1866, Mountain View, ###
### CA 94042, USA. ###
###############################################################
from Tkinter import *
import ttk
import socket
from datetime import datetime
import subprocess
import tkMessageBox
class Scanning_port:
def __init__(self, master):
master.title('Infosecplatform Presents PFv1.0')
master.resizable(False, False)
master.configure(background = "#e1d8b9")
self.style = ttk.Style()
self.style.configure('TFrame', background = "#e1d8b9")
self.style.configure('TButton', background = "#e1d8b9")
self.style.configure('TLabel', background = "#e1d8b9")
self.style.configure('TSeparator', background = "#e1d8b9")
self.style.configure('Header.TLabel', font = ('Arial', 18, 'bold'))
## Frame 1 ##
self.frame_header = ttk.Frame(master)
self.frame_header.pack()
ttk.Label(self.frame_header, text = "Python Framework v 1.0", style = 'Header.TLabel').grid(row = 0, column = 1, padx = 5, pady = 5, sticky = 'sw')
ttk.Label(self.frame_header, wraplength=295, text = "Port Scanning and").grid(row = 1, column = 1, padx = 5, sticky = 'sw')
ttk.Label(self.frame_header, wraplength=295, text = "Banner Grabbing Tool for VAPT Professionals").grid(row = 2, column = 1, padx = 5, pady = 5, sticky = 'sw')
ttk.Separator(self.frame_header,orient=HORIZONTAL).grid(row=3, columnspan=5,sticky="ew", padx =5, pady = 10)
## END ##
## Frame 2 ##
self.frame_content = ttk.Frame(master)
self.frame_content.config(height = 200, width = 400)
#self.frame_content.config(relief = GROOVE)
self.frame_content.pack()
ttk.Label(self.frame_content, text = "Enter Target IP Address: ").grid(row = 2, column = 0, padx =5, pady = 10)
self.entry_name = ttk.Entry(self.frame_content, textvariable="server")
self.entry_name.setvar(name="server", value="127.0.0.1")
self.entry_name.grid(row = 3, column = 0, padx = 5)
ttk.Button(self.frame_content, text = "Scan", command=self.dscan).grid(row = 3, column = 1, padx = 5, pady = 10, sticky = 'se')
ttk.Button(self.frame_content, text = "Clear", command = self.Clear).grid(row = 3, column = 2, padx = 5, pady = 10, sticky = 'se')
## END ##
## Frame 3 ##
self.frame_report = ttk.Frame(master)
self.frame_content.config(height = 400, width = 400)
self.frame_report.pack()
self.txt = Text(self.frame_report, width = 60, height = 15)
self.txt.grid(row=4,column=0, sticky=W, padx = 5, pady = 5)
self.txt.insert(0.0, 'Open port will appear here (default range 1-1025)-Click Scan')
def dscan(self):
self.txt.delete(0.0, END)
subprocess.call('clear', shell=True)
remoteServer = self.entry_name.get()
remoteServerIP = socket.gethostbyname(remoteServer)
t1 = datetime.now()
print('Please wait, scanning remote host (default port range 1-1025)', remoteServerIP)
try:
for port in range(1,1025):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServer,port))
if result == 0:
#print "Port {}: Open".format(port)
msg0 = "\nPort {}: Open ".format(port) + "--> Banner Grabbing: " + sock.recv(1024)
self.txt.insert(0.0, msg0)
sock.close()
except KeyboardInterrupt:
print "You Pressed Ctrl + c"
sys.exit()
except socket.gaierror:
print "Couldn't connect to server"
sys.exit()
t2 = datetime.now()
total = t2 - t1
print "Scanning Completed in: ", total
tkMessageBox.showinfo(title="Report Status!",message="Scaning Process Completed ")
def Clear(self):
self.entry_name.delete(0, 'end')
self.txt.delete(0.0, 'end')
def main():
root = Tk()
scan = Scanning_port(root)
menubar = Menu(root, background = "#e1d8b9")
filemenu = Menu(menubar, tearoff=0, background = "#e1d8b9")
filemenu.add_command(label="Scan", command=scan.dscan)
filemenu.add_command(label="Clear", command=scan.Clear)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=filemenu, background = "#e1d8b9")
helpmenu = Menu(menubar, tearoff=0, background = "#e1d8b9")
helpmenu.add_command(label="Help", command=index0)
helpmenu.add_command(label="About...", command=index)
menubar.add_cascade(label="Help", menu=helpmenu)
root.config(menu=menubar, background = "#e1d8b9")
root.mainloop()
def index():
filewin = Toplevel()
labelframe = LabelFrame(filewin, text="About", background = "#e1d8b9")
labelframe.pack(fill="both", expand="yes")
left1 = Label(labelframe, background = "#e1d8b9",
text="Infosecplatform presents Python Framework v 1.0\n", font = "Verdana 10 bold").pack()
left7 = Label(labelframe, background = "#e1d8b9", text="Got Questions ?",font = "Verdana 10 bold").pack()
left8 = Label(labelframe, wraplength=325, background = "#e1d8b9",
text="Please Submit your questions, comments and requests to niraj007m@gmail.com\n https://about.me/niraj.mohite\n https://infosecplatform.wordpress.com/").pack()
left9 = Label(labelframe, wraplength=325, background = "#e1d8b9",
text="This Tool is only for learning purpose, "
"We are not responsible if you misuse it !\n", font = "Verdana 7").pack()
left10 = Label(labelframe, wraplength=300, background = "#e1d8b9",
text="This work is licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported License."
"To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.", font = "Verdana 7").pack()
filewin.mainloop()
def index0():
filewin1 = Toplevel()
labelframe = LabelFrame(filewin1, text="Help", background = "#e1d8b9")
labelframe.pack(fill="both", expand="yes")
left1 = Label(labelframe, background = "#e1d8b9",
text="Infosecplatform presents Python Framework v 1.0\n", font = "Verdana 10 bold").pack()
left2 = Label(labelframe, background = "#e1d8b9",
text="What is Python Framework v 1.0 ?", font = "Verdana 10 bold").pack()
left3 = Label(labelframe, background = "#e1d8b9",
text="PFv1.0 Provides:").pack()
left4 = Label(labelframe, background = "#e1d8b9",
text="Simply GUI - Python based Tool for").pack()
left5 = Label(labelframe, background = "#e1d8b9", text="1. Port scanning.").pack()
left6 = Label(labelframe, background = "#e1d8b9", text="2. Banner Grabbing.\n").pack()
filewin1.mainloop()
if __name__ == "__main__": main()
###############################################################
### Python Framework for VAPT v 1.0 ###
### ###
### Designed by Niraj M. ###
### niraj007m[at]gmail[dot]com ###
### This work is licensed under the Creative Commons ###
### Attribution-ShareAlike 3.0 Unported License. ###
### To view a copy of this license, visit ###
### http://creativecommons.org/licenses/by-sa/3.0/ or send a###
### letter to Creative Commons, PO Box 1866, Mountain View, ###
### CA 94042, USA. ###
###############################################################
|
niraj007m/Python-Framework-v1.0
|
PFV1.py
|
Python
|
cc0-1.0
| 7,515
|
[
"VisIt"
] |
96d5c9fe1cf859b80e9463999c054d62871f1b5444b24d23337a9b8e1d3a1316
|
from setuptools import setup, find_packages
setup(
name = 'jper-sword-in',
version = '1.0.0',
packages = find_packages(),
install_requires = [
"octopus==1.0.0",
"esprit",
"Flask"
],
url = 'http://cottagelabs.com/',
author = 'Cottage Labs',
author_email = 'us@cottagelabs.com',
description = 'SWORDv2 deposit endpoint for JPER',
classifiers = [
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
JiscPER/jper-sword-in
|
setup.py
|
Python
|
apache-2.0
| 620
|
[
"Octopus"
] |
070cce44c2a65ee7be39b22086ec47ea22e4dd3ab6eb962c3038d6f27180cbf6
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import kademlia
setup(
name="kademlia",
version=kademlia.__version__,
description="Kademlia is a distributed hash table for decentralized peer-to-peer computer networks.",
author="Brian Muller",
author_email="bamuller@gmail.com",
license="MIT",
url="http://github.com/bmuller/kademlia",
packages=find_packages(),
install_requires=["rpcudp>=3.0.0"]
)
|
faisalburhanudin/kademlia
|
setup.py
|
Python
|
mit
| 453
|
[
"Brian"
] |
f1479813f4f785357deb4e124c7c32bdcb90bc06b173da6fba05f0a94fa24a30
|
#! /usr/bin/python
#
# Copyright (C) 2003-2017 ABINIT group
#
# Written by Gabriel Antonius in python (compatible v2.7).
# This is free software, and you are welcome to redistribute it
# under certain conditions (GNU General Public License,
# see ~abinit/COPYING or http://www.gnu.org/copyleft/gpl.txt).
#
# ABINIT is a project of the Universite Catholique de Louvain,
# Corning Inc. and other collaborators, see ~abinit/doc/developers/contributors.txt.
# Please read ~abinit/doc/users/acknowledgments.html for suggested
# acknowledgments of the ABINIT effort.
#
# For more information, see http://www.abinit.org .
"""
This script can be run interactively,
but it is recommended to import it as a module:
>>> from merge_ddb_nc import merge_ddb_nc
>>> merge_ddb_nc(out_fname, fnames)
"""
from __future__ import print_function
import numpy as np
import netCDF4 as nc
__version__ = '1.0.0'
def merge_ddb_nc(out_fname, fnames):
"""
Merge a list of DDB.nc files containing different elements of the same qpoint.
Arguments
---------
out_fname: Name for the merged file (will overwrite any existing file).
fnames: List of DDB.nc files.
"""
if not fnames:
raise Exception('Empty list of files given for merge')
fname0 = fnames.pop(0)
with nc.Dataset(out_fname, 'w') as dsout:
with nc.Dataset(fname0, 'r') as dsin:
nc_copy(dsin, dsout)
q0 = dsin.variables[u'q_point_reduced_coord'][...]
for fname in fnames:
with nc.Dataset(fname, 'r') as dsin:
# Check that the qpoints are the same
q = dsin.variables[u'q_point_reduced_coord'][...]
if not all(np.isclose(q0, q)):
raise Exception('Cannot merge DDB.nc at different q-points.')
# Merge dynamical matrix
dynmat = dsin.variables[u'second_derivative_of_energy'][...]
dynmat_mask = dsin.variables[u'second_derivative_of_energy_mask'][...]
out_dynmat = dsin.variables[u'second_derivative_of_energy']
out_dynmat_mask = dsin.variables[u'second_derivative_of_energy_mask']
ni,nj,nk,nl = dynmat_mask.shape
for i in range(ni):
for j in range(nj):
for k in range(nk):
for l in range(nl):
if dynmat_mask[i,j,k,l]:
dsout.variables[u'second_derivative_of_energy'][i,j,k,l,:] = (
dynmat[i,j,k,l,:])
dsout.variables[u'second_derivative_of_energy_mask'][i,j,k,l] = (
dynmat_mask[i,j,k,l])
# Born effective charge tensor
BECT = dsin.variables[u'born_effective_charge_tensor'][...]
BECT_mask = dsin.variables[u'born_effective_charge_tensor_mask'][...]
ni,nj,nk = BECT_mask.shape
for i in range(ni):
for j in range(nj):
for k in range(nk):
if BECT_mask[i,j,k]:
dsout.variables[u'born_effective_charge_tensor'][i,j,k] = (
BECT[i,j,k])
dsout.variables[u'born_effective_charge_tensor_mask'][i,j,k] = (
BECT_mask[i,j,k])
def nc_copy(dsin, dsout):
"""
Copy all dimensions and variable of one nc.Dataset instance into another.
"""
#Copy dimensions
for dname, dim in dsin.dimensions.iteritems():
dsout.createDimension(dname, len(dim))
#Copy variables
for vname, varin in dsin.variables.iteritems():
outVar = dsout.createVariable(vname, varin.datatype, varin.dimensions)
outVar[...] = varin[...]
def interactive_merge_ddb_nc():
"""Get inputs from the user and run merge_ddb_nc."""
program_name = 'merge_ddb_nc'
description = """Merge several DDB.nc files, belonging to the same q-point."""
def get_user(s):
return raw_input(s.rstrip() + '\n').split('#')[0]
print(program_name)
print(len(program_name) * '-')
print(description + '\n')
ui = get_user('Enter a name for the output file in which to merge (will overwrite any existing file):')
out_fname = str(ui)
ui = get_user('Enter the number of files to merge:')
nfiles = int(ui)
fnames = list()
for i in range(nfiles):
ui = get_user('Enter the name of file {}:'.format(i+1))
fname = str(ui)
fnames.append(fname)
# Main execution
print('Executing...')
merge_ddb_nc(out_fname, fnames)
print('All done.')
# =========================================================================== #
# Run interactive program
# =========================================================================== #
if __name__ == '__main__':
interactive_merge_ddb_nc()
|
jmbeuken/abinit
|
scripts/post_processing/merge_ddb_nc.py
|
Python
|
gpl-3.0
| 4,935
|
[
"ABINIT"
] |
c1e90b1bb58512d4ac0b31bf255b82842c85490800188b5979a5e1f9987094ce
|
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import ChemicalForceFields, rdtrajectory
from rdkit.Chem.rdtrajectory import Snapshot, \
Trajectory, ReadAmberTrajectory, ReadGromosTrajectory
import os, sys
import unittest
from rdkit import RDConfig
def feq(v1, v2, tol=1.0e-4):
return abs(v1 - v2) < tol
class TestCase(unittest.TestCase):
def setUp(self):
pass
def testSnapshot(self):
s = Snapshot([])
e = False
try:
s.GetPoint2D(12)
except:
e = True
self.assertTrue(e)
s = Snapshot([0.0, 0.0, 0.0])
e = False
try:
s.GetPoint2D(0)
except:
e = True
self.assertTrue(e)
def testTrajectory2D(self):
dim = 2
np = 10
ns = 5
traj = Trajectory(dim, np)
self.assertEqual(traj.Dimension(), dim)
self.assertEqual(traj.NumPoints(), np)
c = []
for i in range(np * dim):
c.append(float(i))
for i in range(ns):
traj.AddSnapshot(Snapshot(c, float(i)))
self.assertEqual(len(traj), ns)
e = False
try:
traj.GetSnapshot(ns)
except:
e = True
self.assertTrue(e)
e = False
try:
traj.GetSnapshot(0).GetPoint2D(np)
except:
e = True
self.assertTrue(e)
for i in range(np):
self.assertAlmostEqual(traj.GetSnapshot(0).GetPoint2D(i).x, float(i * dim))
self.assertAlmostEqual(traj.GetSnapshot(0).GetPoint2D(i).y, float(i * dim + 1))
e = False
try:
self.assertAlmostEqual(traj.GetSnapshot(0).GetPoint3D(i).z, 0.0)
except:
e = True
self.assertFalse(e)
for i in range(ns):
self.assertAlmostEqual(traj.GetSnapshot(i).GetEnergy(), float(i))
traj.RemoveSnapshot(0)
self.assertEqual(len(traj), ns - 1)
for i in range(ns - 1):
self.assertAlmostEqual(traj.GetSnapshot(i).GetEnergy(), float(i + 1))
traj.InsertSnapshot(0, Snapshot(c, 999.0))
self.assertEqual(len(traj), ns)
copySnapshot = Snapshot(traj.GetSnapshot(0))
traj.AddSnapshot(copySnapshot)
self.assertEqual(len(traj), ns + 1)
self.assertAlmostEqual(traj.GetSnapshot(0).GetEnergy(), 999.0)
self.assertAlmostEqual(traj.GetSnapshot(1).GetEnergy(), 1.0)
self.assertAlmostEqual(traj.GetSnapshot(len(traj) - 1).GetEnergy(), 999.0)
traj2 = Trajectory(traj)
self.assertEqual(len(traj), len(traj2))
def testTrajectory3D(self):
dim = 3
np = 10
ns = 5
traj = Trajectory(dim, np)
self.assertEqual(traj.Dimension(), dim)
self.assertEqual(traj.NumPoints(), np)
c = []
for i in range(np * dim):
c.append(float(i))
for i in range(ns):
traj.AddSnapshot(Snapshot(c, float(i)))
self.assertEqual(len(traj), ns)
e = False
try:
traj.GetSnapshot(ns)
except:
e = True
self.assertTrue(e)
e = False
try:
traj.GetSnapshot(0).GetPoint2D(np)
except:
e = True
self.assertTrue(e)
for i in range(np):
self.assertAlmostEqual(traj.GetSnapshot(0).GetPoint3D(i).x, float(i * dim))
self.assertAlmostEqual(traj.GetSnapshot(0).GetPoint3D(i).y, float(i * dim + 1))
self.assertAlmostEqual(traj.GetSnapshot(0).GetPoint3D(i).z, float(i * dim + 2))
if (not i):
e = False
try:
traj.GetSnapshot(0).GetPoint2D(i)
except:
e = True
self.assertTrue(e)
for i in range(ns):
self.assertAlmostEqual(traj.GetSnapshot(i).GetEnergy(), float(i))
traj.RemoveSnapshot(0)
self.assertEqual(len(traj), ns - 1)
for i in range(ns - 1):
self.assertAlmostEqual(traj.GetSnapshot(i).GetEnergy(), float(i + 1))
traj.InsertSnapshot(0, Snapshot(c, 999.0))
self.assertEqual(len(traj), ns)
copySnapshot = Snapshot(traj.GetSnapshot(0))
traj.AddSnapshot(copySnapshot)
self.assertEqual(len(traj), ns + 1)
self.assertAlmostEqual(traj.GetSnapshot(0).GetEnergy(), 999.0)
self.assertAlmostEqual(traj.GetSnapshot(1).GetEnergy(), 1.0)
self.assertAlmostEqual(traj.GetSnapshot(len(traj) - 1).GetEnergy(), 999.0)
traj2 = Trajectory(traj)
self.assertEqual(len(traj), len(traj2))
def testReadAmber(self):
rdbase = os.environ['RDBASE']
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords_bad.trx')
traj = Trajectory(2, 0)
ok = False
try:
ReadAmberTrajectory(fName, traj)
except:
ok = True
self.assertTrue(ok)
traj = Trajectory(3, 3)
ok = False
try:
ReadAmberTrajectory(fName, traj)
except:
ok = True
self.assertTrue(ok)
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords_bad2.trx')
ok = False
try:
traj = Trajectory(3, 3)
ReadAmberTrajectory(fName, traj)
except:
ok = True
self.assertTrue(ok)
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords.trx')
traj = Trajectory(3, 3)
ReadAmberTrajectory(fName, traj)
self.assertEqual(len(traj), 1)
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords2.trx')
traj = Trajectory(3, 3)
ReadAmberTrajectory(fName, traj)
self.assertEqual(len(traj), 2)
def testReadAmberPython(self):
# reimplemented the Amber trajectory reader in Python
# let's check we get the same data as the C++ reader
# (test for building a trajectory out of Snapshots from Python)
rdbase = os.environ['RDBASE']
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords2.trx')
traj = Trajectory(3, 3)
nCoords = traj.NumPoints() * 3
nSnapshots = 0
hnd = open(fName, 'r')
line = hnd.readline()
lineNum = 0
c = []
i = 0
while (line):
lineNum += 1
if (lineNum > 1):
tok = line.strip().split()
j = 0
while ((i < nCoords) and (j < len(tok))):
c.append(float(tok[j]))
j += 1
i += 1
if (i == nCoords):
nSnapshots += 1
traj.AddSnapshot(Snapshot(c))
c = []
i = 0
line = ' '.join(tok[j:]) + ' '
else:
line = ''
else:
line = ''
line += hnd.readline()
hnd.close()
self.assertEqual(i, 0)
self.assertEqual(nSnapshots, 2)
traj2 = Trajectory(3, 3)
ReadAmberTrajectory(fName, traj2)
self.assertEqual(len(traj), len(traj2))
self.assertEqual(traj.NumPoints(), traj2.NumPoints())
for snapshotNum in range(len(traj)):
for pointNum in range(traj.NumPoints()):
for i in range(3):
self.assertAlmostEqual(
traj.GetSnapshot(snapshotNum).GetPoint3D(pointNum)[i],
traj2.GetSnapshot(snapshotNum).GetPoint3D(pointNum)[i])
def testReadGromos(self):
rdbase = os.environ['RDBASE']
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords_bad.trc')
traj = Trajectory(2, 0)
ok = False
try:
ReadGromosTrajectory(fName, traj)
except:
ok = True
self.assertTrue(ok)
traj = Trajectory(3, 3)
ok = False
try:
ReadGromosTrajectory(fName, traj)
except:
ok = True
self.assertTrue(ok)
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords_bad2.trc')
ok = False
try:
traj = Trajectory(3, 3)
ReadGromosTrajectory(fName, traj)
except:
ok = True
self.assertTrue(ok)
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords.trc')
traj = Trajectory(3, 3)
ReadGromosTrajectory(fName, traj)
self.assertEqual(len(traj), 1)
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords2.trc')
traj = Trajectory(3, 3)
ReadGromosTrajectory(fName, traj)
self.assertEqual(len(traj), 2)
def testAddConformersFromTrajectory(self):
molBlock = \
'\n' \
' RDKit 3D\n' \
'\n' \
' 71 74 0 0 0 0 0 0 0 0999 V2000\n' \
' 8.2543 3.1901 -0.3005 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 7.4558 1.9712 0.0938 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 7.3934 1.0441 -0.9483 O 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 6.6660 -0.0533 -0.4641 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 5.1928 0.2346 -0.4609 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 4.3713 -0.9410 -0.5770 N 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 3.1852 -1.0034 -1.2291 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 2.2914 0.1276 -1.6316 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 0.9308 -0.4468 -1.9908 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 0.1417 -0.7821 -0.7545 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -0.1848 0.3695 0.0456 N 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -1.5661 0.7686 -0.0745 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -2.4768 -0.0640 0.8206 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -3.8874 0.1143 0.3941 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -4.6333 -0.9984 0.0264 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -6.0127 -0.9516 -0.0400 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -6.7062 0.1599 0.3963 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -8.0408 0.4828 -0.1977 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -7.7914 1.1180 -1.5591 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -8.7622 1.4403 0.7265 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -8.8409 -0.7397 -0.4395 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -8.9121 -1.6637 0.4258 O 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -9.7414 -0.7636 -1.5059 O 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -5.9736 1.2357 0.8565 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -4.5843 1.2252 0.8530 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 0.6263 1.4884 -0.3942 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 2.0541 1.0258 -0.4230 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 2.9225 -2.3317 -1.2963 N 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 3.6061 -2.9745 -0.3180 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 3.3554 -4.1536 0.3735 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 3.7653 -4.2712 1.6948 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 4.8254 -3.4613 2.0796 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 5.1978 -2.3436 1.3419 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 4.5694 -2.0799 0.1305 C 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 9.3138 3.1372 0.0031 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 7.8117 4.0754 0.1798 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 8.2358 3.3535 -1.4074 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 6.4027 2.2146 0.3634 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 7.9270 1.5444 1.0040 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 7.0677 -0.2415 0.5615 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 6.9530 -0.9105 -1.1025 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 4.9578 0.7259 0.5137 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 4.9985 0.9430 -1.3033 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 2.7171 0.7264 -2.4494 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 0.3994 0.2339 -2.6810 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 1.1342 -1.4171 -2.5076 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -0.7632 -1.3370 -1.0391 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 0.7845 -1.4394 -0.1311 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 0.0125 0.1989 1.0673 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -1.6672 1.8215 0.2925 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -1.8705 0.7271 -1.1337 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -2.3045 0.3159 1.8590 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -2.1980 -1.1367 0.7635 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -4.1513 -1.9468 -0.2114 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -6.6138 -1.7460 -0.4718 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -7.0727 0.4399 -2.0858 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -7.3144 2.1076 -1.4482 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -8.7609 1.1720 -2.1135 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -8.3137 2.4504 0.5729 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -8.6170 1.0817 1.7580 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -9.8244 1.4444 0.4200 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -6.4629 2.0541 1.3719 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' -4.0445 2.0563 1.3058 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 0.3329 1.8224 -1.3991 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 0.4920 2.3164 0.3160 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 2.2025 0.3766 0.4766 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 2.7945 1.8369 -0.3969 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 2.4404 -4.6964 0.1303 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 3.3157 -5.0055 2.3587 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 5.4272 -3.7654 2.9380 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 5.5668 -1.5069 1.9380 H 0 0 0 0 0 0 0 0 0 0 0 0\n' \
' 1 2 1 0\n' \
' 2 3 1 0\n' \
' 3 4 1 0\n' \
' 4 5 1 0\n' \
' 5 6 1 0\n' \
' 6 7 1 0\n' \
' 7 8 1 0\n' \
' 8 9 1 0\n' \
' 9 10 1 0\n' \
' 10 11 1 0\n' \
' 11 12 1 0\n' \
' 12 13 1 0\n' \
' 13 14 1 0\n' \
' 14 15 2 0\n' \
' 15 16 1 0\n' \
' 16 17 2 0\n' \
' 17 18 1 0\n' \
' 18 19 1 0\n' \
' 18 20 1 0\n' \
' 18 21 1 0\n' \
' 21 22 2 0\n' \
' 21 23 1 0\n' \
' 17 24 1 0\n' \
' 24 25 2 0\n' \
' 11 26 1 0\n' \
' 26 27 1 0\n' \
' 7 28 2 0\n' \
' 28 29 1 0\n' \
' 29 30 2 0\n' \
' 30 31 1 0\n' \
' 31 32 2 0\n' \
' 32 33 1 0\n' \
' 33 34 2 0\n' \
' 34 6 1 0\n' \
' 27 8 1 0\n' \
' 34 29 1 0\n' \
' 25 14 1 0\n' \
' 1 35 1 0\n' \
' 1 36 1 0\n' \
' 1 37 1 0\n' \
' 2 38 1 0\n' \
' 2 39 1 0\n' \
' 4 40 1 0\n' \
' 4 41 1 0\n' \
' 5 42 1 0\n' \
' 5 43 1 0\n' \
' 8 44 1 0\n' \
' 9 45 1 0\n' \
' 9 46 1 0\n' \
' 10 47 1 0\n' \
' 10 48 1 0\n' \
' 11 49 1 0\n' \
' 12 50 1 0\n' \
' 12 51 1 0\n' \
' 13 52 1 0\n' \
' 13 53 1 0\n' \
' 15 54 1 0\n' \
' 16 55 1 0\n' \
' 19 56 1 0\n' \
' 19 57 1 0\n' \
' 19 58 1 0\n' \
' 20 59 1 0\n' \
' 20 60 1 0\n' \
' 20 61 1 0\n' \
' 24 62 1 0\n' \
' 25 63 1 0\n' \
' 26 64 1 0\n' \
' 26 65 1 0\n' \
' 27 66 1 0\n' \
' 27 67 1 0\n' \
' 30 68 1 0\n' \
' 31 69 1 0\n' \
' 32 70 1 0\n' \
' 33 71 1 0\n' \
'M CHG 2 11 1 23 -1\n' \
'M END\n'
mol = Chem.MolFromMolBlock(molBlock, removeHs=False)
everySteps = 10
maxIts = 1000
gradTol = 0.01
rdbase = os.environ['RDBASE']
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'Wrap', 'test_data',
'bilastine_trajectory.sdf')
w = Chem.SDWriter(fName)
field = ChemicalForceFields.MMFFGetMoleculeForceField(
mol, ChemicalForceFields.MMFFGetMoleculeProperties(mol))
(res, sv) = field.MinimizeTrajectory(everySteps, maxIts, gradTol)
self.assertEqual(res, 0)
traj = Trajectory(3, mol.GetNumAtoms(), sv)
mol.RemoveConformer(0)
traj.AddConformersToMol(mol)
traj.Clear()
n1 = mol.GetNumConformers()
traj.AddConformersToMol(mol)
n2 = mol.GetNumConformers()
self.assertEqual(n1, n2)
for nConf in range(mol.GetNumConformers()):
mol.SetProp('ENERGY', '{0:.4f}'.format(traj.GetSnapshot(nConf).GetEnergy()))
w.write(mol, nConf)
w.close()
def testAddConformersFromAmberTrajectory(self):
mol = Chem.MolFromSmiles('CCC')
rdbase = os.environ['RDBASE']
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords.trx')
traj = Trajectory(3, mol.GetNumAtoms())
ReadAmberTrajectory(fName, traj)
self.assertEqual(len(traj), 1)
for i in range(2):
traj.AddConformersToMol(mol)
self.assertEqual(mol.GetNumConformers(), i + 1)
self.assertEqual(mol.GetConformer(i).GetNumAtoms(), 3)
self.assertAlmostEqual(mol.GetConformer(i).GetAtomPosition(0).x, 0.1941767)
self.assertAlmostEqual(mol.GetConformer(i).GetAtomPosition(2).z, -0.4088006)
mol.RemoveAllConformers()
e = False
try:
traj.AddConformersToMol(mol, 1)
except:
e = True
self.assertTrue(e)
self.assertEqual(mol.GetNumConformers(), 0)
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords2.trx')
traj = Trajectory(3, mol.GetNumAtoms())
ReadAmberTrajectory(fName, traj)
self.assertEqual(len(traj), 2)
traj.AddConformersToMol(mol)
self.assertEqual(mol.GetNumConformers(), 2)
mol.RemoveAllConformers()
traj.AddConformersToMol(mol, 0, 0)
self.assertEqual(mol.GetNumConformers(), 1)
traj.AddConformersToMol(mol, 1)
self.assertEqual(mol.GetNumConformers(), 2)
def testAddConformersFromGromosTrajectory(self):
mol = Chem.MolFromSmiles('CCC')
rdbase = os.environ['RDBASE']
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords.trc')
traj = Trajectory(3, mol.GetNumAtoms())
ReadGromosTrajectory(fName, traj)
self.assertEqual(len(traj), 1)
for i in range(2):
traj.AddConformersToMol(mol)
self.assertEqual(mol.GetNumConformers(), i + 1)
self.assertEqual(mol.GetConformer(i).GetNumAtoms(), 3)
self.assertAlmostEqual(mol.GetConformer(i).GetAtomPosition(0).x, 1.941767)
self.assertAlmostEqual(mol.GetConformer(i).GetAtomPosition(2).z, -4.088006)
mol.RemoveAllConformers()
e = False
try:
traj.AddConformersToMol(mol, 1)
except:
e = True
self.assertTrue(e)
self.assertEqual(mol.GetNumConformers(), 0)
fName = os.path.join(rdbase, 'Code', 'GraphMol', 'test_data', 'water_coords2.trc')
traj = Trajectory(3, mol.GetNumAtoms())
ReadGromosTrajectory(fName, traj)
self.assertEqual(len(traj), 2)
traj.AddConformersToMol(mol)
self.assertEqual(mol.GetNumConformers(), 2)
mol.RemoveAllConformers()
traj.AddConformersToMol(mol, 0, 0)
self.assertEqual(mol.GetNumConformers(), 1)
traj.AddConformersToMol(mol, 1)
self.assertEqual(mol.GetNumConformers(), 2)
if __name__ == '__main__':
print("Testing Trajectory wrapper")
unittest.main()
|
jandom/rdkit
|
Code/GraphMol/Wrap/testTrajectory.py
|
Python
|
bsd-3-clause
| 19,353
|
[
"Amber",
"RDKit"
] |
1d2bf17e8b609a0d67acc74d17a71b4eb1b04c4807c9748305376381a32f5ecb
|
#!/usr/bin/env python
# Reports a beta diversity matrix for tabular input file
# using scikit-bio
# Daniel Blankenberg
import sys
import optparse
import codecs
from skbio.diversity import beta_diversity
from skbio import TreeNode
__VERSION__ = "0.0.1"
DELIMITER = '\t'
NEEDS_TREE = [ 'unweighted_unifrac', 'weighted_unifrac' ]
NEEDS_OTU_NAMES = [ 'unweighted_unifrac', 'weighted_unifrac' ]
def __main__():
parser = optparse.OptionParser( usage="%prog [options]" )
parser.add_option( '-v', '--version', dest='version', action='store_true', default=False, help='print version and exit' )
parser.add_option( '-i', '--input', dest='input', action='store', type="string", default=None, help='Input abundance Filename' )
parser.add_option( '', '--otu_column', dest='otu_column', action='store', type="int", default=None, help='OTU ID Column (1 based)' )
parser.add_option( '', '--sample_columns', dest='sample_columns', action='store', type="string", default=None, help='Comma separated list of sample columns, unset to use all.' )
parser.add_option( '', '--header', dest='header', action='store_true', default=False, help='Abundance file has a header line' )
parser.add_option( '', '--distance_metric', dest='distance_metric', action='store', type="string", default=None, help='Distance metric to use' )
parser.add_option( '', '--tree', dest='tree', action='store', type="string", default=None, help='Newick Tree Filename' )
parser.add_option( '-o', '--output', dest='output', action='store', type="string", default=None, help='Output Filename' )
(options, args) = parser.parse_args()
if options.version:
print >> sys.stderr, "scikit-bio betadiversity from tabular file", __VERSION__
sys.exit()
if options.otu_column is not None:
otu_column = options.otu_column - 1
else:
otu_column = None
if options.sample_columns is None:
with open( options.input, 'rb' ) as fh:
line = fh.readline()
columns = range( len( line.split( DELIMITER ) ) )
if otu_column in columns:
columns.remove( otu_column )
else:
columns = map( lambda x: int( x ) - 1, options.sample_columns.split( "," ) )
max_col = max( columns + [otu_column] )
counts = [ [] for x in columns ]
sample_names = []
otu_names = []
with open( options.input, 'rb' ) as fh:
if options.header:
header = fh.readline().rstrip('\n\r').split( DELIMITER )
sample_names = [ header[i] for i in columns ]
else:
sample_names = [ "SAMPLE_%i" % x for x in range( len( columns ) ) ]
for i, line in enumerate( fh ):
fields = line.rstrip('\n\r').split( DELIMITER )
if len(fields) <= max_col:
print >> sys.stederr, "Bad data line: ", fields
continue
if otu_column is not None:
otu_names.append( fields[ otu_column ] )
else:
otu_names.append( "OTU_%i" % i )
for j, col in enumerate( columns ):
counts[ j ].append( int( fields[ col ] ) )
extra_kwds = {}
if options.distance_metric in NEEDS_OTU_NAMES:
extra_kwds['otu_ids'] = otu_names
if options.distance_metric in NEEDS_TREE:
assert options.tree, Exception( "You must provide a newick tree when using '%s'" % options.distance_metric )
# NB: TreeNode apparently needs unicode files
with codecs.open( options.tree, 'rb', 'utf-8' ) as fh:
extra_kwds['tree'] = TreeNode.read( fh )
bd_dm = beta_diversity( options.distance_metric, counts, ids=sample_names, **extra_kwds )
bd_dm.write( options.output )
if __name__ == "__main__":
__main__()
|
nturaga/tools-iuc
|
tools/scikit-bio/scikit_bio_diversity_beta_diversity.py
|
Python
|
mit
| 3,773
|
[
"scikit-bio"
] |
7969538c177fb6b34fd71a1559eec622a9cad33aab57ddf4ec4db017c82bbfc4
|
import os, sys, re, inspect, types, errno, pprint, subprocess, io, shutil, time, copy, unittest
import path_tool
path_tool.activate_module('FactorySystem')
path_tool.activate_module('argparse')
from ParseGetPot import ParseGetPot
from socket import gethostname
#from options import *
from util import *
from RunParallel import RunParallel
from CSVDiffer import CSVDiffer
from XMLDiffer import XMLDiffer
from Tester import Tester
from PetscJacobianTester import PetscJacobianTester
from InputParameters import InputParameters
from Factory import Factory
from Parser import Parser
from Warehouse import Warehouse
import argparse
from optparse import OptionParser, OptionGroup, Values
from timeit import default_timer as clock
class TestHarness:
@staticmethod
def buildAndRun(argv, app_name, moose_dir):
if '--store-timing' in argv:
harness = TestTimer(argv, app_name, moose_dir)
else:
harness = TestHarness(argv, app_name, moose_dir)
harness.findAndRunTests()
sys.exit(harness.error_code)
def __init__(self, argv, app_name, moose_dir):
self.factory = Factory()
# Build a Warehouse to hold the MooseObjects
self.warehouse = Warehouse()
# Get dependant applications and load dynamic tester plugins
# If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
sys.path.append(os.path.join(moose_dir, 'framework', 'scripts')) # For find_dep_apps.py
# Use the find_dep_apps script to get the dependant applications for an app
import find_dep_apps
depend_app_dirs = find_dep_apps.findDepApps(app_name)
dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])
# Finally load the plugins!
self.factory.loadPlugins(dirs, 'testers', Tester)
self.test_table = []
self.num_passed = 0
self.num_failed = 0
self.num_skipped = 0
self.num_pending = 0
self.host_name = gethostname()
self.moose_dir = moose_dir
self.base_dir = os.getcwd()
self.run_tests_dir = os.path.abspath('.')
self.code = '2d2d6769726c2d6d6f6465'
self.error_code = 0x0
# Assume libmesh is a peer directory to MOOSE if not defined
if os.environ.has_key("LIBMESH_DIR"):
self.libmesh_dir = os.environ['LIBMESH_DIR']
else:
self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
self.file = None
# Failed Tests file object
self.writeFailedTest = None
# Parse arguments
self.parseCLArgs(argv)
self.checks = {}
self.checks['platform'] = getPlatforms()
self.checks['submodules'] = getInitializedSubmodules(self.run_tests_dir)
# The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
# to select whether they want to probe for libMesh configuration options.
if self.options.skip_config_checks:
self.checks['compiler'] = set(['ALL'])
self.checks['petsc_version'] = 'N/A'
self.checks['library_mode'] = set(['ALL'])
self.checks['mesh_mode'] = set(['ALL'])
self.checks['dtk'] = set(['ALL'])
self.checks['unique_ids'] = set(['ALL'])
self.checks['vtk'] = set(['ALL'])
self.checks['tecplot'] = set(['ALL'])
self.checks['dof_id_bytes'] = set(['ALL'])
self.checks['petsc_debug'] = set(['ALL'])
self.checks['curl'] = set(['ALL'])
self.checks['tbb'] = set(['ALL'])
self.checks['superlu'] = set(['ALL'])
self.checks['slepc'] = set(['ALL'])
self.checks['unique_id'] = set(['ALL'])
self.checks['cxx11'] = set(['ALL'])
self.checks['asio'] = set(['ALL'])
else:
self.checks['compiler'] = getCompilers(self.libmesh_dir)
self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
self.checks['dtk'] = getLibMeshConfigOption(self.libmesh_dir, 'dtk')
self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
self.checks['vtk'] = getLibMeshConfigOption(self.libmesh_dir, 'vtk')
self.checks['tecplot'] = getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
self.checks['curl'] = getLibMeshConfigOption(self.libmesh_dir, 'curl')
self.checks['tbb'] = getLibMeshConfigOption(self.libmesh_dir, 'tbb')
self.checks['superlu'] = getLibMeshConfigOption(self.libmesh_dir, 'superlu')
self.checks['slepc'] = getLibMeshConfigOption(self.libmesh_dir, 'slepc')
self.checks['unique_id'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
self.checks['cxx11'] = getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
self.checks['asio'] = getIfAsioExists(self.moose_dir)
# Override the MESH_MODE option if using the '--distributed-mesh'
# or (deprecated) '--parallel-mesh' option.
if (self.options.parallel_mesh == True or self.options.distributed_mesh == True) or \
(self.options.cli_args != None and \
(self.options.cli_args.find('--parallel-mesh') != -1 or self.options.cli_args.find('--distributed-mesh') != -1)):
option_set = set(['ALL', 'PARALLEL'])
self.checks['mesh_mode'] = option_set
method = set(['ALL', self.options.method.upper()])
self.checks['method'] = method
self.initialize(argv, app_name)
"""
Recursively walks the current tree looking for tests to run
Error codes:
0x0 - Success
0x7F - Parser error (any flag in this range)
0x80 - TestHarness error
"""
def findAndRunTests(self, find_only=False):
self.error_code = 0x0
self.preRun()
self.start_time = clock()
try:
# PBS STUFF
if self.options.pbs:
# Check to see if we are using the PBS Emulator.
# Its expensive, so it must remain outside of the os.walk for loop.
self.options.PBSEmulator = self.checkPBSEmulator()
if self.options.pbs and os.path.exists(self.options.pbs):
self.options.processingPBS = True
self.processPBSResults()
else:
self.options.processingPBS = False
self.base_dir = os.getcwd()
for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
# Prune submdule paths when searching for tests
if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
dirnames[:] = []
# walk into directories that aren't contrib directories
if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
for file in filenames:
# set cluster_handle to be None initially (happens for each test)
self.options.cluster_handle = None
# See if there were other arguments (test names) passed on the command line
if file == self.options.input_file_name: #and self.test_match.search(file):
saved_cwd = os.getcwd()
sys.path.append(os.path.abspath(dirpath))
os.chdir(dirpath)
if self.prunePath(file):
continue
# Build a Parser to parse the objects
parser = Parser(self.factory, self.warehouse)
# Parse it
self.error_code = self.error_code | parser.parse(file)
# Retrieve the tests from the warehouse
testers = self.warehouse.getActiveObjects()
# Augment the Testers with additional information directly from the TestHarness
for tester in testers:
self.augmentParameters(file, tester)
# Short circuit this loop if we've only been asked to parse Testers
# Note: The warehouse will accumulate all testers in this mode
if find_only:
self.warehouse.markAllObjectsInactive()
continue
# Clear out the testers, we won't need them to stick around in the warehouse
self.warehouse.clear()
if self.options.enable_recover:
testers = self.appendRecoverableTests(testers)
# Handle PBS tests.cluster file
if self.options.pbs:
(tester, command) = self.createClusterLauncher(dirpath, testers)
if command is not None:
tester.setStatus('LAUNCHED', tester.bucket_pbs)
self.runner.run(tester, command)
else:
# Go through the Testers and run them
for tester in testers:
# Double the alloted time for tests when running with the valgrind option
tester.setValgrindMode(self.options.valgrind_mode)
# When running in valgrind mode, we end up with a ton of output for each failed
# test. Therefore, we limit the number of fails...
if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
tester.setStatus('Max Fails Exceeded', tester.bucket_fail)
elif self.num_failed > self.options.max_fails:
tester.setStatus('Max Fails Exceeded', tester.bucket_fail)
elif tester.parameters().isValid('error_code'):
tester.setStatus('Parser Error', tester.bucket_skip)
else:
should_run = tester.checkRunnableBase(self.options, self.checks, self.test_list)
# check for deprecated tuple
if type(should_run) == type(()):
(should_run, reason) = should_run
if not should_run:
reason = 'deprected checkRunnableBase #8037'
tester.setStatus(reason, tester.bucket_skip)
if should_run:
command = tester.getCommand(self.options)
# This method spawns another process and allows this loop to continue looking for tests
# RunParallel will call self.testOutputAndFinish when the test has completed running
# This method will block when the maximum allowed parallel processes are running
if self.options.dry_run:
self.handleTestStatus(tester)
else:
self.runner.run(tester, command)
else: # This job is skipped - notify the runner
status = tester.getStatus()
if status != tester.bucket_silent: # SILENT occurs when a user is using --re options
if (self.options.report_skipped and status == tester.bucket_skip) \
or status == tester.bucket_skip:
self.handleTestStatus(tester)
elif status == tester.bucket_deleted and self.options.extra_info:
self.handleTestStatus(tester)
self.runner.jobSkipped(tester.parameters()['test_name'])
# See if any tests have colliding outputs
self.checkForRaceConditionOutputs(testers, dirpath)
os.chdir(saved_cwd)
sys.path.pop()
except KeyboardInterrupt:
if self.writeFailedTest != None:
self.writeFailedTest.close()
print '\nExiting due to keyboard interrupt...'
sys.exit(0)
self.runner.join()
# Wait for all tests to finish
if self.options.pbs and self.options.processingPBS == False:
print '\n< checking batch status >\n'
self.options.processingPBS = True
self.processPBSResults()
self.cleanup()
# Flags for the parser start at the low bit, flags for the TestHarness start at the high bit
if self.num_failed:
self.error_code = self.error_code | 0x80
return
def createClusterLauncher(self, dirpath, testers):
self.options.test_serial_number = 0
command = None
tester = None
# Create the tests.cluster input file
# Loop through each tester and create a job
for tester in testers:
should_run = tester.checkRunnableBase(self.options, self.checks)
if should_run:
if self.options.cluster_handle == None:
self.options.cluster_handle = open(dirpath + '/' + self.options.pbs + '.cluster', 'w')
self.options.cluster_handle.write('[Jobs]\n')
# This returns the command to run as well as builds the parameters of the test
# The resulting command once this loop has completed is sufficient to launch
# all previous jobs
command = tester.getCommand(self.options)
self.options.cluster_handle.write('[]\n')
self.options.test_serial_number += 1
else: # This job is skipped - notify the runner
status = tester.getStatus()
if status != tester.bucket_silent: # SILENT occurs when a user is using --re options
if (self.options.report_skipped and status == tester.bucket_skip) or status == tester.bucket_skip:
self.handleTestStatus(tester)
elif status == tester.bucket_deleted and self.options.extra_info:
self.handleTestStatus(tester)
self.runner.jobSkipped(tester.parameters()['test_name'])
# Close the tests.cluster file
if self.options.cluster_handle is not None:
self.options.cluster_handle.close()
self.options.cluster_handle = None
# Return the final tester/command (sufficient to run all tests)
return (tester, command)
def prunePath(self, filename):
test_dir = os.path.abspath(os.path.dirname(filename))
# Filter tests that we want to run
# Under the new format, we will filter based on directory not filename since it is fixed
prune = True
if len(self.tests) == 0:
prune = False # No filter
else:
for item in self.tests:
if test_dir.find(item) > -1:
prune = False
# Return the inverse of will_run to indicate that this path should be pruned
return prune
def augmentParameters(self, filename, tester):
params = tester.parameters()
# We are going to do some formatting of the path that is printed
# Case 1. If the test directory (normally matches the input_file_name) comes first,
# we will simply remove it from the path
# Case 2. If the test directory is somewhere in the middle then we should preserve
# the leading part of the path
test_dir = os.path.abspath(os.path.dirname(filename))
relative_path = test_dir.replace(self.run_tests_dir, '')
first_directory = relative_path.split(os.path.sep)[1] # Get first directory
relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
relative_path = re.sub('^[/:]*', '', relative_path) # Trim slashes and colons
formatted_name = relative_path + '.' + tester.name()
params['test_name'] = formatted_name
params['test_dir'] = test_dir
params['relative_path'] = relative_path
params['executable'] = self.executable
params['hostname'] = self.host_name
params['moose_dir'] = self.moose_dir
params['base_dir'] = self.base_dir
params['first_directory'] = first_directory
if params.isValid('prereq'):
if type(params['prereq']) != list:
print "Option 'prereq' needs to be of type list in " + params['test_name']
sys.exit(1)
params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]
# This method splits a lists of tests into two pieces each, the first piece will run the test for
# approx. half the number of timesteps and will write out a restart file. The second test will
# then complete the run using the MOOSE recover option.
def appendRecoverableTests(self, testers):
new_tests = []
for part1 in testers:
if part1.parameters()['recover'] == True:
# Clone the test specs
part2 = copy.deepcopy(part1)
# Part 1:
part1_params = part1.parameters()
part1_params['test_name'] += '_part1'
part1_params['cli_args'].append('--half-transient Outputs/checkpoint=true')
part1_params['skip_checks'] = True
# Part 2:
part2_params = part2.parameters()
part2_params['prereq'].append(part1.parameters()['test_name'])
part2_params['delete_output_before_running'] = False
part2_params['cli_args'].append('--recover')
part2_params.addParam('caveats', ['recover'], "")
new_tests.append(part2)
testers.extend(new_tests)
return testers
## Finish the test by inspecting the raw output
def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
caveats = []
test = tester.specs # Need to refactor
if test.isValid('caveats'):
caveats = test['caveats']
# Check for test failure using the status bucket
did_pass = tester.didPass()
status = tester.getStatus()
result = ''
# PASS and DRY_RUN fall into this catagory
if did_pass:
if self.options.extra_info:
checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
for check in checks:
if not 'ALL' in test[check]:
caveats.append(', '.join(test[check]))
if len(caveats):
result = '[' + ', '.join(caveats).upper() + '] ' + tester.getSuccessMessage()
else:
result = tester.getSuccessMessage()
# FAIL, DIFF and DELETED fall into this catagory
elif status == tester.bucket_fail or status == tester.bucket_diff or status == tester.bucket_deleted:
result = 'FAILED (%s)' % tester.getStatusMessage()
# PBS and any other possibly unknown statuses fall into this catagory.
# Note: SKIP and RUNNING messages are handled in handleTestResult because the
# TestHarness does not call 'testOutputAndFinish' for SKIP/RUNNING statuses.
else:
result = tester.getStatusMessage()
self.handleTestResult(tester, output, result, start, end)
def getTiming(self, output):
time = ''
m = re.search(r"Active time=(\S+)", output)
if m != None:
return m.group(1)
def getSolveTime(self, output):
time = ''
m = re.search(r"solve().*", output)
if m != None:
return m.group().split()[5]
def checkExpectError(self, output, expect_error):
if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
#print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
return False
else:
return True
# PBS Defs
def checkPBSEmulator(self):
try:
qstat_process = subprocess.Popen(['qstat', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_output = qstat_process.communicate()
except OSError:
# qstat binary is not available
print 'qstat not available. Perhaps you need to load the PBS module?'
sys.exit(1)
if len(qstat_output[1]):
# The PBS Emulator has no --version argument, and thus returns output to stderr
return True
else:
return False
def processPBSResults(self):
# If batch file exists, check the contents for pending tests.
if os.path.exists(self.options.pbs):
# Build a list of launched jobs
batch_file = open(self.options.pbs)
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
# Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
for job in batch_list:
file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]
# Populate the input_file_name argument so augmentParameters can format the test_name
self.options.input_file_name = job[-1]
# Build a Warehouse to hold the MooseObjects
warehouse = Warehouse()
# Build a Parser to parse the objects
parser = Parser(self.factory, warehouse)
# Parse it
parser.parse(file)
# Retrieve the tests from the warehouse
testers = warehouse.getAllObjects()
for tester in testers:
self.augmentParameters(file, tester)
for tester in testers:
reason = ''
# Build the requested Tester object
if job[1] == tester.parameters()['test_name']:
# Create Test Type
# test = self.factory.create(tester.parameters()['type'], tester)
# Get job status via qstat
qstat = ['qstat', '-f', '-x', str(job[0])]
qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
if qstat_stdout != None:
output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
else:
return ('QSTAT NOT FOUND', '')
# Report the current status of JOB_ID
if output_value == 'F':
# F = Finished. Get the exit code reported by qstat
exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))
# Read the stdout file
if os.path.exists(job[2]):
output_file = open(job[2], 'r')
# Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
outfile = output_file.read()
output_file.close()
output = tester.processResults(tester.specs['moose_dir'], exit_code, self.options, outfile)
self.testOutputAndFinish(tester, exit_code, outfile)
continue
else:
# I ran into this scenario when the cluster went down, but launched/completed my job :)
reason = 'FAILED (NO STDOUT FILE)'
tester.setStatus(reason, tester.bucket_fail)
elif output_value == 'R':
# Job is currently running
reason = 'RUNNING'
elif output_value == 'E':
# Job is exiting
reason = 'EXITING'
elif output_value == 'Q':
# Job is currently queued
reason = 'QUEUED'
if reason != '' and tester.getStatus() != tester.bucket_fail:
tester.setStatus(reason, tester.bucket_pbs)
self.handleTestStatus(tester)
else:
return ('BATCH FILE NOT FOUND', '')
def buildPBSBatch(self, output, tester):
# Create/Update the batch file
if 'command not found' in output:
tester.setStatus('QSUB NOT FOUND', tester.bucket_fail)
else:
# Get the Job information from the ClusterLauncher
results = re.findall(r'JOB_NAME: (\w+) JOB_ID:.* (\d+).*TEST_NAME: (\S+)', output)
if len(results) != 0:
file_name = self.options.pbs
job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
for result in results:
(test_dir, job_id, test_name) = result
qstat_command = subprocess.Popen(['qstat', '-f', '-x', str(job_id)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
# Get the Output_Path from qstat stdout
if qstat_stdout != None:
output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '').strip()
else:
job_list.close()
tester.setStatus('QSTAT NOT FOUND', tester.bucket_fail)
# Write job_id, test['test_name'], and Ouput_Path to the batch file
job_list.write(str(job_id) + ':' + test_name + ':' + output_value + ':' + self.options.input_file_name + '\n')
# Return to TestHarness and inform we have launched the job
job_list.close()
tester.setStatus('LAUNCHED', tester.bucket_pbs)
else:
tester.setStatus('QSTAT INVALID RESULTS', tester.bucket_fail)
return
def cleanPBSBatch(self):
# Open the PBS batch file and assign it to a list
if os.path.exists(self.options.pbs_cleanup):
batch_file = open(self.options.pbs_cleanup, 'r')
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
else:
print 'PBS batch file not found:', self.options.pbs_cleanup
sys.exit(1)
# Loop through launched jobs and delete whats found.
for job in batch_list:
if os.path.exists(job[2]):
batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
if os.path.exists('/'.join(batch_dir)):
shutil.rmtree('/'.join(batch_dir))
if os.path.exists('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster'):
os.remove('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster')
os.remove(self.options.pbs_cleanup)
# END PBS Defs
## Method to print output generated by the TestHarness while attempting to run a tester
def handleTestStatus(self, tester, output=None):
status = tester.getStatus()
test_completed = False
# Statuses that inform the TestHarness, that this test is still running.
if status == tester.bucket_pending:
print printResult(tester, tester.getStatusMessage(), 0, 0, 0, self.options)
# Statuses generated when using PBS options
elif status == tester.bucket_pbs:
# TestHarness wants to check on a PBS job that was launched (qstat)
if self.options.pbs and self.options.processingPBS == False:
self.buildPBSBatch(output, tester)
# This can potentially cause a failure (qstat issues), so handle that case separately
if status == tester.bucket_fail:
self.handleTestResult(tester, '', tester.getStatusMessage(), 0, 0, True)
test_completed = True
else:
print printResult(tester, tester.getStatusMessage(), 0, 0, 0, self.options)
# Job was launched during a previous run, so instead of printing to the screen
# add the statuses obtained on _this_ run to the 'Final Test Result' table.
elif self.options.pbs and self.options.processingPBS == True:
self.handleTestResult(tester, '', tester.getStatusMessage(), 0, 0, True)
# All other statuses will be testers that exited prematurely (according to the TestHarness)
# So populate the result now based on status, and send the test to the result method to be
# printed to the screen
else:
result = tester.getStatusMessage()
self.handleTestResult(tester, '', result, 0, 0, True)
test_completed = True
return test_completed
## Update global variables and print output based on the test result
def handleTestResult(self, tester, output, result, start=0, end=0, add_to_table=True):
caveats = []
timing = ''
status = tester.getStatus()
did_pass = tester.didPass()
if tester.specs.isValid('caveats'):
caveats = tester.specs['caveats']
if self.options.timing:
timing = self.getTiming(output)
elif self.options.store_time:
timing = self.getSolveTime(output)
# format the SKIP messages received
if status == tester.bucket_skip:
# Include caveats in skipped messages? Usefull to know when a scaled long "RUNNING..." test completes
# but Exodiff is instructed to 'SKIP' on scaled tests.
if len(caveats):
result = '[' + ', '.join(caveats).upper() + '] skipped (' + tester.getStatusMessage() + ')'
else:
result = 'skipped (' + tester.getStatusMessage() + ')'
# result is normally populated by a tester object when a test has failed. But in this case
# checkRunnableBase determined the test a failure before it even ran. So we need to set the
# results here, so they are printed if the extra_info argument was supplied
elif status == tester.bucket_deleted:
result = tester.getStatusMessage()
# Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
# in the 'Final Test Results' area.
if add_to_table:
self.test_table.append( (tester, output, result, timing, start, end) )
if status == tester.bucket_skip:
self.num_skipped += 1
elif status == tester.bucket_success:
self.num_passed += 1
elif status == tester.bucket_pending or status == tester.bucket_pbs:
self.num_pending += 1
else:
# Dump everything else into the failure status (neccessary due to PBS launch failures
# not being stored in the tester status bucket)
self.num_failed += 1
self.postRun(tester.specs, timing)
print printResult(tester, result, timing, start, end, self.options)
if self.options.verbose or (not did_pass and not self.options.quiet):
output = output.replace('\r', '\n') # replace the carriage returns with newlines
lines = output.split('\n');
# Obtain color based on test status
color = tester.getColor()
if output != '': # PBS Failures can result in empty output, so lets not print that stuff twice
test_name = colorText(tester.specs['test_name'] + ": ", color, colored=self.options.colored, code=self.options.code)
output = test_name + ("\n" + test_name).join(lines)
print output
# Print result line again at the bottom of the output for failed tests
print printResult(tester, result, timing, start, end, self.options), "(reprint)"
if status != tester.bucket_skip:
if not did_pass and not self.options.failed_tests:
self.writeFailedTest.write(tester.specs['test_name'] + '\n')
if self.options.file:
self.file.write(printResult( tester, result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
if self.options.sep_files or (self.options.fail_files and not did_pass) or (self.options.ok_files and did_pass):
fname = os.path.join(tester.specs['test_dir'], tester.specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
f = open(fname, 'w')
f.write(printResult( tester, result, timing, start, end, self.options, color=False) + '\n')
f.write(output)
f.close()
# Print final results, close open files, and exit with the correct error code
def cleanup(self):
# Print the results table again if a bunch of output was spewed to the screen between
# tests as they were running
if (self.options.verbose or (self.num_failed != 0 and not self.options.quiet)) and not self.options.dry_run:
print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
print printResult(test, result, timing, start, end, self.options)
time = clock() - self.start_time
print '-' * (TERM_COLS-1)
# Mask off TestHarness error codes to report parser errors
fatal_error = ''
if self.error_code & Parser.getErrorCodeMask():
fatal_error += ', <r>FATAL PARSER ERROR</r>'
if self.error_code & ~Parser.getErrorCodeMask():
fatal_error += ', <r>FATAL TEST HARNESS ERROR</r>'
# Print a different footer when performing a dry run
if self.options.dry_run:
print 'Processed %d tests in %.1f seconds' % (self.num_passed+self.num_skipped, time)
summary = '<b>%d would run</b>'
summary += ', <b>%d would be skipped</b>'
summary += fatal_error
print colorText( summary % (self.num_passed, self.num_skipped), "", html = True, \
colored=self.options.colored, code=self.options.code )
else:
print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)
if self.num_passed:
summary = '<g>%d passed</g>'
else:
summary = '<b>%d passed</b>'
summary += ', <b>%d skipped</b>'
if self.num_pending:
summary += ', <c>%d pending</c>'
else:
summary += ', <b>%d pending</b>'
if self.num_failed:
summary += ', <r>%d FAILED</r>'
else:
summary += ', <b>%d failed</b>'
summary += fatal_error
print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), "", html = True, \
colored=self.options.colored, code=self.options.code )
if self.options.pbs:
print '\nYour PBS batch file:', self.options.pbs
if self.file:
self.file.close()
# Close the failed_tests file
if self.writeFailedTest != None:
self.writeFailedTest.close()
def initialize(self, argv, app_name):
# Initialize the parallel runner with how many tests to run in parallel
self.runner = RunParallel(self, self.options.jobs, self.options.load)
## Save executable-under-test name to self.executable
self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method
# Save the output dir since the current working directory changes during tests
self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)
# Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
if self.options.output_dir:
try:
os.makedirs(self.output_dir)
except OSError, ex:
if ex.errno == errno.EEXIST: pass
else: raise
# Open the file to redirect output to and set the quiet option for file output
if self.options.file:
self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
if self.options.file or self.options.fail_files or self.options.sep_files:
self.options.quiet = True
## Parse command line options and assign them to self.options
def parseCLArgs(self, argv):
parser = argparse.ArgumentParser(description='A tool used to test MOOSE based applications')
parser.add_argument('test_name', nargs=argparse.REMAINDER)
parser.add_argument('--opt', action='store_const', dest='method', const='opt', help='test the app_name-opt binary')
parser.add_argument('--dbg', action='store_const', dest='method', const='dbg', help='test the app_name-dbg binary')
parser.add_argument('--devel', action='store_const', dest='method', const='devel', help='test the app_name-devel binary')
parser.add_argument('--oprof', action='store_const', dest='method', const='oprof', help='test the app_name-oprof binary')
parser.add_argument('--pro', action='store_const', dest='method', const='pro', help='test the app_name-pro binary')
parser.add_argument('-j', '--jobs', nargs='?', metavar='int', action='store', type=int, dest='jobs', const=1, help='run test binaries in parallel')
parser.add_argument('-e', action='store_true', dest='extra_info', help='Display "extra" information including all caveats and deleted tests')
parser.add_argument('-c', '--no-color', action='store_false', dest='colored', help='Do not show colored output')
parser.add_argument('--color-first-directory', action='store_true', dest='color_first_directory', help='Color first directory')
parser.add_argument('--heavy', action='store_true', dest='heavy_tests', help='Run tests marked with HEAVY : True')
parser.add_argument('--all-tests', action='store_true', dest='all_tests', help='Run normal tests and tests marked with HEAVY : True')
parser.add_argument('-g', '--group', action='store', type=str, dest='group', default='ALL', help='Run only tests in the named group')
parser.add_argument('--not_group', action='store', type=str, dest='not_group', help='Run only tests NOT in the named group')
parser.add_argument('--dbfile', nargs='?', action='store', dest='dbFile', help='Location to timings data base file. If not set, assumes $HOME/timingDB/timing.sqlite')
parser.add_argument('-l', '--load-average', action='store', type=float, dest='load', default=64.0, help='Do not run additional tests if the load average is at least LOAD')
parser.add_argument('-t', '--timing', action='store_true', dest='timing', help='Report Timing information for passing tests')
parser.add_argument('-s', '--scale', action='store_true', dest='scaling', help='Scale problems that have SCALE_REFINE set')
parser.add_argument('-i', nargs=1, action='store', type=str, dest='input_file_name', default='tests', help='The default test specification file to look for (default="tests").')
parser.add_argument('--libmesh_dir', nargs=1, action='store', type=str, dest='libmesh_dir', help='Currently only needed for bitten code coverage')
parser.add_argument('--skip-config-checks', action='store_true', dest='skip_config_checks', help='Skip configuration checks (all tests will run regardless of restrictions)')
parser.add_argument('--parallel', '-p', nargs='?', action='store', type=int, dest='parallel', const=1, help='Number of processors to use when running mpiexec')
parser.add_argument('--n-threads', nargs=1, action='store', type=int, dest='nthreads', default=1, help='Number of threads to use when running mpiexec')
parser.add_argument('-d', action='store_true', dest='debug_harness', help='Turn on Test Harness debugging')
parser.add_argument('--recover', action='store_true', dest='enable_recover', help='Run a test in recover mode')
parser.add_argument('--valgrind', action='store_const', dest='valgrind_mode', const='NORMAL', help='Run normal valgrind tests')
parser.add_argument('--valgrind-heavy', action='store_const', dest='valgrind_mode', const='HEAVY', help='Run heavy valgrind tests')
parser.add_argument('--valgrind-max-fails', nargs=1, type=int, dest='valgrind_max_fails', default=5, help='The number of valgrind tests allowed to fail before any additional valgrind tests will run')
parser.add_argument('--max-fails', nargs=1, type=int, dest='max_fails', default=50, help='The number of tests allowed to fail before any additional tests will run')
parser.add_argument('--pbs', nargs='?', metavar='batch_file', dest='pbs', const='generate', help='Enable launching tests via PBS. If no batch file is specified one will be created for you')
parser.add_argument('--pbs-cleanup', nargs=1, metavar='batch_file', help='Clean up the directories/files created by PBS. You must supply the same batch_file used to launch PBS.')
parser.add_argument('--pbs-project', nargs=1, default='moose', help='Identify PBS job submission to specified project')
parser.add_argument('--re', action='store', type=str, dest='reg_exp', help='Run tests that match --re=regular_expression')
parser.add_argument('--failed-tests', action='store_true', dest='failed_tests', help='Run tests that previously failed')
# Options that pass straight through to the executable
parser.add_argument('--parallel-mesh', action='store_true', dest='parallel_mesh', help='Deprecated, use --distributed-mesh instead')
parser.add_argument('--distributed-mesh', action='store_true', dest='distributed_mesh', help='Pass "--distributed-mesh" to executable')
parser.add_argument('--error', action='store_true', help='Run the tests with warnings as errors (Pass "--error" to executable)')
parser.add_argument('--error-unused', action='store_true', help='Run the tests with errors on unused parameters (Pass "--error-unused" to executable)')
# Option to use for passing unwrapped options to the executable
parser.add_argument('--cli-args', nargs='?', type=str, dest='cli_args', help='Append the following list of arguments to the command line (Encapsulate the command in quotes)')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', help="Pass --dry-run to print commands to run, but don't actually run them")
outputgroup = parser.add_argument_group('Output Options', 'These options control the output of the test harness. The sep-files options write output to files named test_name.TEST_RESULT.txt. All file output will overwrite old files')
outputgroup.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='show the output of every test')
outputgroup.add_argument('-q', '--quiet', action='store_true', dest='quiet', help='only show the result of every test, don\'t show test output even if it fails')
outputgroup.add_argument('--no-report', action='store_false', dest='report_skipped', help='do not report skipped tests')
outputgroup.add_argument('--show-directory', action='store_true', dest='show_directory', help='Print test directory path in out messages')
outputgroup.add_argument('-o', '--output-dir', nargs=1, metavar='directory', dest='output_dir', default='', help='Save all output files in the directory, and create it if necessary')
outputgroup.add_argument('-f', '--file', nargs=1, action='store', dest='file', help='Write verbose output of each test to FILE and quiet output to terminal')
outputgroup.add_argument('-x', '--sep-files', action='store_true', dest='sep_files', help='Write the output of each test to a separate file. Only quiet output to terminal. This is equivalant to \'--sep-files-fail --sep-files-ok\'')
outputgroup.add_argument('--sep-files-ok', action='store_true', dest='ok_files', help='Write the output of each passed test to a separate file')
outputgroup.add_argument('-a', '--sep-files-fail', action='store_true', dest='fail_files', help='Write the output of each FAILED test to a separate file. Only quiet output to terminal.')
outputgroup.add_argument("--store-timing", action="store_true", dest="store_time", help="Store timing in the SQL database: $HOME/timingDB/timing.sqlite A parent directory (timingDB) must exist.")
outputgroup.add_argument("--testharness-unittest", action="store_true", help="Run the TestHarness unittests that test the TestHarness.")
outputgroup.add_argument("--revision", nargs=1, action="store", type=str, dest="revision", help="The current revision being tested. Required when using --store-timing.")
outputgroup.add_argument("--yaml", action="store_true", dest="yaml", help="Dump the parameters for the testers in Yaml Format")
outputgroup.add_argument("--dump", action="store_true", dest="dump", help="Dump the parameters for the testers in GetPot Format")
code = True
if self.code.decode('hex') in argv:
del argv[argv.index(self.code.decode('hex'))]
code = False
self.options = parser.parse_args(argv[1:])
self.tests = self.options.test_name
self.options.code = code
# Convert all list based options of length one to scalars
for key, value in vars(self.options).items():
if type(value) == list and len(value) == 1:
tmp_str = getattr(self.options, key)
setattr(self.options, key, value[0])
# If attempting to test only failed_tests, open the .failed_tests file and create a list object
# otherwise, open the failed_tests file object for writing (clobber).
self.test_list = []
failed_tests_file = os.path.join(os.getcwd(), '.failed_tests')
if self.options.failed_tests:
with open(failed_tests_file, 'r') as tmp_failed_tests:
self.test_list = tmp_failed_tests.read().split('\n')
else:
self.writeFailedTest = open(failed_tests_file, 'w')
self.checkAndUpdateCLArgs()
## Called after options are parsed from the command line
# Exit if options don't make any sense, print warnings if they are merely weird
def checkAndUpdateCLArgs(self):
opts = self.options
if opts.output_dir and not (opts.file or opts.sep_files or opts.fail_files or opts.ok_files):
print 'WARNING: --output-dir is specified but no output files will be saved, use -f or a --sep-files option'
if opts.group == opts.not_group:
print 'ERROR: The group and not_group options cannot specify the same group'
sys.exit(1)
if opts.store_time and not (opts.revision):
print 'ERROR: --store-timing is specified but no revision'
sys.exit(1)
if opts.store_time:
# timing returns Active Time, while store_timing returns Solve Time.
# Thus we need to turn off timing.
opts.timing = False
opts.scaling = True
if opts.valgrind_mode and (opts.parallel > 1 or opts.nthreads > 1):
print 'ERROR: --parallel and/or --threads can not be used with --valgrind'
sys.exit(1)
# Update any keys from the environment as necessary
if not self.options.method:
if os.environ.has_key('METHOD'):
self.options.method = os.environ['METHOD']
else:
self.options.method = 'opt'
if not self.options.valgrind_mode:
self.options.valgrind_mode = ''
# Update libmesh_dir to reflect arguments
if opts.libmesh_dir:
self.libmesh_dir = opts.libmesh_dir
# Generate a batch file if PBS argument supplied with out a file
if opts.pbs == 'generate':
largest_serial_num = 0
for name in os.listdir('.'):
m = re.search('pbs_(\d{3})', name)
if m != None and int(m.group(1)) > largest_serial_num:
largest_serial_num = int(m.group(1))
opts.pbs = "pbs_" + str(largest_serial_num+1).zfill(3)
# When running heavy tests, we'll make sure we use --no-report
if opts.heavy_tests:
self.options.report_skipped = False
def checkForRaceConditionOutputs(self, testers, dirpath):
d = DependencyResolver()
# Create a dictionary of test_names to Tester objects
# We'll use this to retrieve the Tester objects by
# name to call additional methods while determining
# depedencies.
name_to_object = {}
for tester in testers:
name_to_object[tester.getTestName()] = tester
# Now build up our tester dependencies
for tester in testers:
# Now we need to see which dependencies are real
# We don't really care about skipped tests, heavy tests, etc.
for name in tester.getPrereqs():
if not name_to_object[name].getRunnable():
tester.setStatus('skipped dependency', tester.bucket_skip)
prereq_objects = [name_to_object[name] for name in tester.getPrereqs()]
d.insertDependency(tester, prereq_objects)
try:
concurrent_tester_sets = d.getSortedValuesSets()
for concurrent_testers in concurrent_tester_sets:
output_files_in_dir = set()
for tester in concurrent_testers:
if tester.getRunnable():
output_files = tester.getOutputFiles()
duplicate_files = output_files_in_dir.intersection(output_files)
if len(duplicate_files):
print 'Duplicate output files detected in directory:\n', dirpath, '\n\t', '\n\t'.join(duplicate_files)
self.error_code = self.error_code | 0x80
output_files_in_dir.update(output_files)
except:
# Cyclic or invalid dependency, we'll let RunParallel deal with that
# That condition won't effect the output file check
pass
def postRun(self, specs, timing):
return
def preRun(self):
if self.options.yaml:
self.factory.printYaml("Tests")
sys.exit(0)
elif self.options.dump:
self.factory.printDump("Tests")
sys.exit(0)
if self.options.pbs_cleanup:
self.cleanPBSBatch()
sys.exit(0)
def getOptions(self):
return self.options
#################################################################################################################################
# The TestTimer TestHarness
# This method finds and stores timing for individual tests. It is activated with --store-timing
#################################################################################################################################
CREATE_TABLE = """create table timing
(
app_name text,
test_name text,
revision text,
date int,
seconds real,
scale int,
load real
);"""
class TestTimer(TestHarness):
def __init__(self, argv, app_name, moose_dir):
TestHarness.__init__(self, argv, app_name, moose_dir)
try:
from sqlite3 import dbapi2 as sqlite
except:
print 'Error: --store-timing requires the sqlite3 python module.'
sys.exit(1)
self.app_name = app_name
self.db_file = self.options.dbFile
if not self.db_file:
home = os.environ['HOME']
self.db_file = os.path.join(home, 'timingDB/timing.sqlite')
if not os.path.exists(self.db_file):
print 'Warning: creating new database at default location: ' + str(self.db_file)
self.createDB(self.db_file)
else:
print 'Warning: Assuming database location ' + self.db_file
def createDB(self, fname):
from sqlite3 import dbapi2 as sqlite
print 'Creating empty database at ' + fname
con = sqlite.connect(fname)
cr = con.cursor()
cr.execute(CREATE_TABLE)
con.commit()
def preRun(self):
from sqlite3 import dbapi2 as sqlite
# Delete previous data if app_name and repo revision are found
con = sqlite.connect(self.db_file)
cr = con.cursor()
cr.execute('delete from timing where app_name = ? and revision = ?', (self.app_name, self.options.revision))
con.commit()
# After the run store the results in the database
def postRun(self, test, timing):
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(self.db_file)
cr = con.cursor()
timestamp = int(time.time())
load = os.getloadavg()[0]
# accumulate the test results
data = []
sum_time = 0
num = 0
parse_failed = False
# Were only interested in storing scaled data
if timing != None and test['scale_refine'] != 0:
sum_time += float(timing)
num += 1
data.append( (self.app_name, test['test_name'].split('/').pop(), self.options.revision, timestamp, timing, test['scale_refine'], load) )
# Insert the data into the database
cr.executemany('insert into timing values (?,?,?,?,?,?,?)', data)
con.commit()
|
backmari/moose
|
python/TestHarness/TestHarness.py
|
Python
|
lgpl-2.1
| 56,618
|
[
"MOOSE",
"VTK"
] |
c27f7ef011b4daff8c899f3287010787b55c11013184acfc0fd90493e7676e82
|
from __future__ import division
import time
import numpy as np
np.random.seed(1234) # seed random number generator
srng_seed = np.random.randint(2**30)
from keras.models import Sequential
from keras.optimizers import SGD
from keras_extensions.logging import log_to_file
from keras_extensions.rbm import GBRBM, RBM
from keras_extensions.dbn import DBN
from keras_extensions.layers import SampleBernoulli
from keras_extensions.initializers import glorot_uniform_sigm
# configuration
input_dim = 100
hidden_dim = 200
batch_size = 10
nb_epoch = 1
lr = 0.0001 # small learning rate for GB-RBM
momentum_schedule = [(0, 0.5), (5, 0.9)] # start momentum at 0.5, then 0.9 after 5 epochs
@log_to_file('example.log')
def main():
# generate dummy dataset
nframes = 10000
dataset = np.random.normal(loc=np.zeros(input_dim), scale=np.ones(input_dim), size=(nframes, input_dim))
# standardize (in this case superfluous)
#dataset, mean, stddev = standardize(dataset)
# split into train and test portion
ntest = 1000
X_train = dataset[:-ntest :] # all but last 1000 samples for training
X_test = dataset[-ntest:, :] # last 1000 samples for testing
X_trainsub = dataset[:ntest, :] # subset of training data with same number of samples as testset
assert X_train.shape[0] >= X_test.shape[0], 'Train set should be at least size of test set!'
# setup model structure
print('Creating training model...')
dbn = DBN([
GBRBM(input_dim, 200, init=glorot_uniform_sigm),
RBM(200, 400, init=glorot_uniform_sigm),
RBM(400, 300, init=glorot_uniform_sigm),
RBM(300, 50, init=glorot_uniform_sigm),
RBM(50, hidden_dim, init=glorot_uniform_sigm)
])
# setup optimizer, loss
def get_layer_loss(rbm,layer_no):
return rbm.contrastive_divergence_loss(nb_gibbs_steps=1)
def get_layer_optimizer(layer_no):
return SGD((layer_no+1)*lr, 0., decay=0.0, nesterov=False)
dbn.compile(layer_optimizer=get_layer_optimizer, layer_loss=get_layer_loss)
# do training
print('Training...')
begin_time = time.time()
#callbacks = [momentum_scheduler, rec_err_logger, free_energy_gap_logger]
dbn.fit(X_train, batch_size, nb_epoch, verbose=1, shuffle=False)
end_time = time.time()
print('Training took %f minutes' % ((end_time - begin_time)/60.0))
# save model parameters
print('Saving model...')
dbn.save_weights('example.hdf5', overwrite=True)
# load model parameters
print('Loading model...')
dbn.load_weights('example.hdf5')
# generate hidden features from input data
print('Creating inference model...')
F= dbn.get_forward_inference_layers()
B= dbn.get_backward_inference_layers()
inference_model = Sequential()
for f in F:
inference_model.add(f)
inference_model.add(SampleBernoulli(mode='random'))
for b in B[:-1]:
inference_model.add(b)
inference_model.add(SampleBernoulli(mode='random'))
# last layer is a gaussian layer
inference_model.add(B[-1])
print('Compiling Theano graph...')
opt = SGD()
inference_model.compile(opt, loss='mean_squared_error') # XXX: optimizer and loss are not used!
print('Doing inference...')
h = inference_model.predict(dataset)
print(h)
print('Done!')
if __name__ == '__main__':
main()
|
wuaalb/keras_extensions
|
examples/dbn_example.py
|
Python
|
mit
| 3,388
|
[
"Gaussian"
] |
90c0252c3aa6181a7bab5931b207f6a6d7f71ecce93439a1b6148b74708348c9
|
#!/usr/bin/env python
"""
Uninstallation of a DIRAC component
Usage:
dirac-uninstall-component [options] ... System Component|System/Component
Arguments:
System: Name of the DIRAC system (ie: WorkloadManagement)
Component: Name of the DIRAC component (ie: Matcher)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import socket
from DIRAC import exit as DIRACexit
from DIRAC import gLogger, S_OK
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.PromptUser import promptUser
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
from DIRAC.FrameworkSystem.Utilities import MonitoringUtilities
from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient
__RCSID__ = "$Id$"
force = False
def setForce(opVal):
global force
force = True
return S_OK()
@DIRACScript()
def main():
global force
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
gComponentInstaller.exitOnError = True
Script.registerSwitch("f", "force", "Forces the removal of the logs", setForce)
Script.parseCommandLine()
args = Script.getPositionalArgs()
if len(args) == 1:
args = args[0].split('/')
if len(args) < 2:
Script.showHelp(exitCode=1)
system = args[0]
component = args[1]
monitoringClient = ComponentMonitoringClient()
result = monitoringClient.getInstallations({'Instance': component, 'UnInstallationTime': None},
{'System': system},
{'HostName': socket.getfqdn()}, True)
if not result['OK']:
gLogger.error(result['Message'])
DIRACexit(1)
if len(result['Value']) < 1:
gLogger.warn('Given component does not exist')
DIRACexit(1)
if len(result['Value']) > 1:
gLogger.error('Too many components match')
DIRACexit(1)
removeLogs = False
if force:
removeLogs = True
else:
if result['Value'][0]['Component']['Type'] in gComponentInstaller.componentTypes:
result = promptUser('Remove logs?', ['y', 'n'], 'n')
if result['OK']:
removeLogs = result['Value'] == 'y'
else:
gLogger.error(result['Message'])
DIRACexit(1)
result = gComponentInstaller.uninstallComponent(system, component, removeLogs)
if not result['OK']:
gLogger.error(result['Message'])
DIRACexit(1)
result = MonitoringUtilities.monitorUninstallation(system, component)
if not result['OK']:
gLogger.error(result['Message'])
DIRACexit(1)
gLogger.notice('Successfully uninstalled component %s/%s' % (system, component))
DIRACexit()
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_uninstall_component.py
|
Python
|
gpl-3.0
| 2,702
|
[
"DIRAC"
] |
30ece9e11d98ec3273ee52bacfc0be7efb5f2401e1cc2401a53018561ce04b14
|
#!/usr/bin/env python
# Copyright (c) 2014, Jelmer Tiete <jelmer@tiete.be>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Implementation based on stm32loader by Ivan A-R <ivan@tuxotronic.org>
# Serial boot loader over UART for CC2538
# Based on the info found in TI's swru333a.pdf (spma029.pdf)
#
# Bootloader only starts if no valid image is found or if boot loader
# backdoor is enabled.
# Make sure you don't lock yourself out!! (enable backdoor in your firmware)
# More info at https://github.com/JelmerT/cc2538-bsl
from __future__ import print_function
from subprocess import Popen, PIPE
import sys, getopt
import glob
import time
import tempfile
import os
import subprocess
import struct
import binascii
import platform
platform = platform.system()
if platform == 'Windows':
if (sys.version_info > (3, 0)):
import winreg
else:
import _winreg as winreg
#version
VERSION_STRING = "1.1"
# Verbose level
QUIET = 5
# DTR/RTS levels
HIGH = True
LOW = False
# Check which version of Python is running
PY3 = sys.version_info >= (3,0)
try:
import serial
except ImportError:
print('{} requires the Python serial library'.format(sys.argv[0]))
print('Please install it with one of the following:')
print('')
if PY3:
print(' Ubuntu: sudo apt-get install python3-serial')
print(' Mac: sudo port install py34-serial')
else:
print(' Ubuntu: sudo apt-get install python-serial')
print(' Mac: sudo port install py-serial')
sys.exit(1)
def mdebug(level, message, attr='\n'):
if QUIET >= level:
print(message, end=attr, file=sys.stderr)
# Takes chip IDs (obtained via Get ID command) to human-readable names
CHIP_ID_STRS = {0xb964: 'CC2538'}
RETURN_CMD_STRS = {0x40: 'Success',
0x41: 'Unknown command',
0x42: 'Invalid command',
0x43: 'Invalid address',
0x44: 'Flash fail'
}
COMMAND_RET_SUCCESS = 0x40
COMMAND_RET_UNKNOWN_CMD = 0x41
COMMAND_RET_INVALID_CMD = 0x42
COMMAND_RET_INVALID_ADR = 0x43
COMMAND_RET_FLASH_FAIL = 0x44
ADDR_IEEE_ADDRESS_SECONDARY = 0x0027ffcc
class CmdException(Exception):
pass
class CommandInterface(object):
def bsl_start(self, ser):
ser.setRTS(HIGH)
ser.setDTR(HIGH)
time.sleep(0.1)
ser.setRTS(HIGH)
time.sleep(0.1)
ser.setDTR(LOW)
def bsl_stop(self, ser):
ser.setDTR(HIGH)
time.sleep(0.1)
ser.setRTS(LOW)
ser.setDTR(LOW)
def open(self, aport='/dev/tty.usbserial-000013FAB', abaudrate=500000, bsl=False):
self.sp = serial.Serial(
port=aport,
baudrate=abaudrate, # baudrate
bytesize=8, # number of databits
parity=serial.PARITY_NONE,
stopbits=1,
xonxoff=0, # enable software flow control
rtscts=0, # disable RTS/CTS flow control
timeout=0.5 # set a timeout value, None for waiting forever
)
if (bsl == True):
self.bsl_start(self.sp)
def close(self, bsl=False):
if (bsl == True):
self.bsl_stop(self.sp)
self.sp.close()
def _wait_for_ack(self, info="", timeout=0):
stop = time.time() + timeout
got = None
while not got:
got = self._read(2)
if time.time() > stop:
break
if not got:
mdebug(10, "No response to %s" % info)
return 0
# wait for ask
ask = got[1]
if ask == 0xCC:
# ACK
return 1
elif ask == 0x33:
# NACK
mdebug(10, "Target replied with a NACK during %s" % info)
return 0
# Unknown response
mdebug(10, "Unrecognised response 0x%x to %s" % (ask, info))
return 0
def _encode_addr(self, addr):
byte3 = (addr >> 0) & 0xFF
byte2 = (addr >> 8) & 0xFF
byte1 = (addr >> 16) & 0xFF
byte0 = (addr >> 24) & 0xFF
if PY3:
return bytes([byte0, byte1, byte2, byte3])
else:
return (chr(byte0) + chr(byte1) + chr(byte2) + chr(byte3))
def _decode_addr(self, byte0, byte1, byte2, byte3):
return ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | (byte0 << 0))
def _calc_checks(self, cmd, addr, size):
return ((sum(bytearray(self._encode_addr(addr)))
+sum(bytearray(self._encode_addr(size)))
+cmd)
&0xFF)
def _write(self, data):
if PY3:
if type(data) == int:
self.sp.write(bytes([data]))
elif type(data) == bytes or type(data) == bytearray:
self.sp.write(data)
else:
if type(data) == int:
self.sp.write(chr(data))
else:
self.sp.write(data)
def _read(self, length):
got = self.sp.read(length)
if PY3:
return got
else:
return [ord(x) for x in got]
def sendAck(self):
self._write(chr(0x00))
self._write(0xCC)
return
def sendNAck(self):
self._write(chr(0x00))
self._write(chr(0x33))
return
def receivePacket(self):
# stop = time.time() + 5
# got = None
# while not got:
got = self._read(2)
# if time.time() > stop:
# break
# if not got:
# raise CmdException("No response to %s" % info)
size = got[0] #rcv size
chks = got[1] #rcv checksum
data = self._read(size-2) # rcv data
mdebug(10, "*** received %x bytes" % size)
if chks == sum(data)&0xFF:
self.sendAck()
return data
else:
self.sendNAck()
#TODO: retry receiving!
raise CmdException("Received packet checksum error")
return 0
def sendSynch(self):
cmd = 0x55
self.sp.flushInput() #flush serial input buffer for first ACK reception
mdebug(10, "*** sending synch sequence")
self._write(cmd) # send U
self._write(cmd) # send U
return self._wait_for_ack("Synch (0x55 0x55)")
def checkLastCmd(self):
stat = self.cmdGetStatus()
if not (stat):
raise CmdException("No response from target on status request. (Did you disable the bootloader?)")
if stat[0] == COMMAND_RET_SUCCESS:
mdebug(10, "Command Successful")
return 1
else:
stat_str = RETURN_CMD_STRS.get(stat, None)
if stat_str is None:
mdebug(0, 'Warning: unrecognized status returned 0x%x' % stat)
else:
mdebug(0, "Target returned: 0x%x, %s" % (stat, stat_str))
return 0
def cmdPing(self):
cmd = 0x20
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** Ping command (0x20)")
if self._wait_for_ack("Ping (0x20)"):
return self.checkLastCmd()
def cmdReset(self):
cmd = 0x25
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** Reset command (0x25)")
if self._wait_for_ack("Reset (0x25)"):
return 1
def cmdGetChipId(self):
cmd = 0x28
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** GetChipId command (0x28)")
if self._wait_for_ack("Get ChipID (0x28)"):
version = self.receivePacket() # 4 byte answ, the 2 LSB hold chip ID
if self.checkLastCmd():
assert len(version) == 4, "Unreasonable chip id: %s" % repr(version)
chip_id = (version[2] << 8) | version[3]
return chip_id
else:
raise CmdException("GetChipID (0x28) failed")
def cmdGetStatus(self):
cmd = 0x23
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** GetStatus command (0x23)")
if self._wait_for_ack("Get Status (0x23)"):
stat = self.receivePacket()
return stat
def cmdSetXOsc(self):
cmd = 0x29
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** SetXOsc command (0x29)")
if self._wait_for_ack("SetXOsc (0x29)"):
return 1
# UART speed (needs) to be changed!
def cmdRun(self, addr):
cmd=0x22
lng=7
self._write(lng) # send length
self._write(self._calc_checks(cmd,addr,0)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
mdebug(10, "*** Run command(0x22)")
return 1
def cmdEraseMemory(self, addr, size):
cmd=0x26
lng=11
self._write(lng) # send length
self._write(self._calc_checks(cmd,addr,size)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(self._encode_addr(size)) # send size
mdebug(10, "*** Erase command(0x26)")
if self._wait_for_ack("Erase memory (0x26)",10):
return self.checkLastCmd()
def cmdCRC32(self, addr, size):
cmd=0x27
lng=11
self._write(lng) # send length
self._write(self._calc_checks(cmd,addr,size)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(self._encode_addr(size)) # send size
mdebug(10, "*** CRC32 command(0x27)")
if self._wait_for_ack("Get CRC32 (0x27)",1):
crc=self.receivePacket()
if self.checkLastCmd():
return self._decode_addr(crc[3],crc[2],crc[1],crc[0])
def cmdDownload(self, addr, size):
cmd=0x21
lng=11
if (size % 4) != 0: # check for invalid data lengths
raise Exception('Invalid data size: %i. Size must be a multiple of 4.' % size)
self._write(lng) # send length
self._write(self._calc_checks(cmd,addr,size)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(self._encode_addr(size)) # send size
mdebug(10, "*** Download command (0x21)")
if self._wait_for_ack("Download (0x21)",2):
return self.checkLastCmd()
def cmdSendData(self, data):
cmd=0x24
lng=len(data)+3
# TODO: check total size of data!! max 252 bytes!
self._write(lng) # send size
self._write((sum(bytearray(data))+cmd)&0xFF) # send checksum
self._write(cmd) # send cmd
self._write(bytearray(data)) # send data
mdebug(10, "*** Send Data (0x24)")
if self._wait_for_ack("Send data (0x24)",10):
return self.checkLastCmd()
def cmdMemRead(self, addr): # untested
cmd=0x2A
lng=8
self._write(lng) # send length
self._write(self._calc_checks(cmd,addr,4)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(4) # send width, 4 bytes
mdebug(10, "*** Mem Read (0x2A)")
if self._wait_for_ack("Mem Read (0x2A)",1):
data = self.receivePacket()
if self.checkLastCmd():
return data # self._decode_addr(ord(data[3]),ord(data[2]),ord(data[1]),ord(data[0]))
def cmdMemWrite(self, addr, data, width): # untested
# TODO: check width for 1 or 4 and data size
cmd=0x2B
lng=10
self._write(lng) # send length
self._write(self._calc_checks(cmd,addr,0)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(bytearray(data)) # send data
self._write(width) # send width, 4 bytes
mdebug(10, "*** Mem write (0x2B)")
if self._wait_for_ack("Mem Write (0x2B)",2):
return checkLastCmd()
# Complex commands section
def writeMemory(self, addr, data):
lng = len(data)
trsf_size = 248 # amount of data bytes transferred per packet (theory: max 252 + 3)
if PY3:
empty_packet = b'\xff'*trsf_size # empty packet (filled with 0xFF)
else:
empty_packet = [255]*trsf_size # empty packet (filled with 0xFF)
# Boot loader enable check
# TODO: implement check for all chip sizes & take into account partial firmware uploads
if (lng == 524288): #check if file is for 512K model
if not ((data[524247] & (1 << 4)) >> 4): #check the boot loader enable bit (only for 512K model)
if not query_yes_no("The boot loader backdoor is not enabled "\
"in the firmware you are about to write to the target. "\
"You will NOT be able to reprogram the target using this tool if you continue! "\
"Do you want to continue?","no"):
raise Exception('Aborted by user.')
mdebug(5, "Writing %(lng)d bytes starting at address 0x%(addr)X" %
{ 'lng': lng, 'addr': addr})
offs = 0
addr_set = 0
while lng > trsf_size: #check if amount of remaining data is less then packet size
if data[offs:offs+trsf_size] != empty_packet: #skip packets filled with 0xFF
if addr_set != 1:
self.cmdDownload(addr,lng) #set starting address if not set
addr_set = 1
mdebug(5, " Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': trsf_size}, '\r')
sys.stdout.flush()
self.cmdSendData(data[offs:offs+trsf_size]) # send next data packet
else: # skipped packet, address needs to be set
addr_set = 0
offs = offs + trsf_size
addr = addr + trsf_size
lng = lng - trsf_size
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': lng}, '\r')
self.cmdDownload(addr,lng)
return self.cmdSendData(data[offs:offs+lng]) # send last data packet
def query_yes_no(question, default="yes"):
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
if PY3:
choice = input().lower()
else:
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
# Convert the entered IEEE address into an integer
def parse_ieee_address (inaddr):
try:
return int(inaddr, 16)
except ValueError:
# inaddr is not a hex string, look for other formats
if ':' in inaddr:
bytes = inaddr.split(':')
elif '-' in inaddr:
bytes = inaddr.split('-')
if len(bytes) != 8:
raise ValueError("Supplied IEEE address does not contain 8 bytes")
addr = 0
for i,b in zip(range(8), bytes):
try:
addr += int(b, 16) << (56-(i*8))
except ValueError:
raise ValueError("IEEE address contains invalid bytes")
return addr
def print_version():
# Get the version using "git describe".
try:
p = Popen(['git', 'describe', '--tags', '--match', '[0-9]*'],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
line = p.stdout.readlines()[0]
version = line.strip()
except:
# We're not in a git repo, or git failed, use fixed version string.
version = VERSION_STRING
print('%s %s' % (sys.argv[0], version))
def usage():
print("""Usage: %s [-hqVewvr] [-l length] [-p port] [-b baud] [-a addr] [-i addr] [file.bin]
-h This help
-q Quiet
-V Verbose
-e Erase (full)
-w Write
-v Verify (CRC32 check)
-r Read
-l length Length of read
-p port Serial port (default: first USB-like port in /dev)
-b baud Baud speed (default: 500000)
-a addr Target address
-i, --ieee-address addr Set the secondary 64 bit IEEE address
--bsl Use the DTR/RTS lines to trigger the bsl mode
--version Print script version
Examples:
./%s -e -w -v example/main.bin
./%s -e -w -v --ieee-address 00:12:4b:aa:bb:cc:dd:ee example/main.bin
""" % (sys.argv[0],sys.argv[0],sys.argv[0]))
def read(filename):
"""Read the file to be programmed and turn it into a binary"""
with open(filename, 'rb') as f:
bytes = f.read()
if PY3:
return bytes
else:
return [ord(x) for x in bytes]
if __name__ == "__main__":
conf = {
'port': 'auto',
'baud': 500000,
'force_speed' : 0,
'address': 0x00200000,
'erase': 0,
'write': 0,
'verify': 0,
'read': 0,
'len': 0x80000,
'fname':'',
'ieee_address': 0,
'bsl': False
}
# http://www.python.org/doc/2.5.2/lib/module-getopt.html
try:
opts, args = getopt.getopt(sys.argv[1:], "hqVewvrp:b:a:l:i", ['ieee-address=', 'version', 'bsl'])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == '-V':
QUIET = 10
elif o == '-q':
QUIET = 0
elif o == '-h':
usage()
sys.exit(0)
elif o == '-e':
conf['erase'] = 1
elif o == '-w':
conf['write'] = 1
elif o == '-v':
conf['verify'] = 1
elif o == '-r':
conf['read'] = 1
elif o == '-p':
conf['port'] = a
elif o == '-b':
conf['baud'] = eval(a)
conf['force_speed'] = 1
elif o == '-a':
conf['address'] = eval(a)
elif o == '-l':
conf['len'] = eval(a)
elif o == '-i' or o == '--ieee-address':
conf['ieee_address'] = str(a)
elif o == '--bsl':
conf['bsl'] = True
elif o == '--version':
print_version()
sys.exit(0)
else:
assert False, "Unhandled option"
try:
# Sanity checks
if conf['write'] or conf['read'] or conf['verify']: # check for input/output file
try:
args[0]
except:
raise Exception('No file path given.')
if conf['write'] and conf['read']:
if not query_yes_no("You are reading and writing to the same file. This will overwrite your input file. "\
"Do you want to continue?","no"):
raise Exception('Aborted by user.')
if conf['erase'] and conf['read'] and not conf['write']:
if not query_yes_no("You are about to erase your target before reading. "\
"Do you want to continue?","no"):
raise Exception('Aborted by user.')
if conf['read'] and not conf['write'] and conf['verify']:
raise Exception('Verify after read not implemented.')
# Try and find the port automatically
if conf['port'] == 'auto':
ports = []
# Get a list of all USB-like names
if platform == 'Windows':
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'HARDWARE\\DEVICEMAP\\SERIALCOMM')
for i in range(winreg.QueryInfoKey(key)[1]):
try:
val = winreg.EnumValue(key,i)
if val[0].find('VCP') > -1:
ports.append(str(val[1]))
except:
pass
else:
for name in ['tty.usbserial', 'ttyUSB', 'tty.usbmodem']:
ports.extend(glob.glob('/dev/%s*' % name))
ports = sorted(ports)
if ports:
# Found something - take it
conf['port'] = ports[0]
else:
raise Exception('No serial port found.')
cmd = CommandInterface()
cmd.open(conf['port'], conf['baud'], bsl=conf['bsl'])
mdebug(5, "Opening port %(port)s, baud %(baud)d" % {'port':conf['port'],
'baud':conf['baud']})
if conf['write'] or conf['verify']:
mdebug(5, "Reading data from %s" % args[0])
data = read(args[0])
mdebug(5, "Connecting to target...")
if not cmd.sendSynch():
raise CmdException("Can't connect to target. Ensure boot loader is started. (no answer on synch sequence)")
if conf['force_speed'] != 1:
if cmd.cmdSetXOsc(): #switch to external clock source
cmd.close(bsl=conf['bsl'])
conf['baud']=1000000
cmd.open(conf['port'], conf['baud'], bsl=conf['bsl'])
mdebug(6, "Opening port %(port)s, baud %(baud)d" % {'port':conf['port'],'baud':conf['baud']})
mdebug(6, "Reconnecting to target at higher speed...")
if (cmd.sendSynch()!=1):
raise CmdException("Can't connect to target after clock source switch. (Check external crystal)")
else:
raise CmdException("Can't switch target to external clock source. (Try forcing speed)")
# if (cmd.cmdPing() != 1):
# raise CmdException("Can't connect to target. Ensure boot loader is started. (no answer on ping command)")
chip_id = cmd.cmdGetChipId()
chip_id_str = CHIP_ID_STRS.get(chip_id, None)
if chip_id_str is None:
mdebug(0, 'Warning: unrecognized chip ID 0x%x' % chip_id)
else:
mdebug(5, " Target id 0x%x, %s" % (chip_id, chip_id_str))
if conf['erase']:
# we only do full erase for now (CC2538)
address = 0x00200000 #flash start addr for cc2538
size = 0x80000 #total flash size cc2538
mdebug(5, "Erasing %s bytes starting at address 0x%x" % (size, address))
if cmd.cmdEraseMemory(address, size):
mdebug(5, " Erase done")
else:
raise CmdException("Erase failed")
if conf['write']:
# TODO: check if boot loader back-door is open, need to read flash size first to get address
if cmd.writeMemory(conf['address'], data):
mdebug(5, " Write done ")
else:
raise CmdException("Write failed ")
if conf['verify']:
mdebug(5,"Verifying by comparing CRC32 calculations.")
crc_local = (binascii.crc32(bytearray(data))& 0xffffffff)
crc_target = cmd.cmdCRC32(conf['address'],len(data)) #CRC of target will change according to length input file
if crc_local == crc_target:
mdebug(5, " Verified (match: 0x%08x)" % crc_local)
else:
cmd.cmdReset()
raise Exception("NO CRC32 match: Local = 0x%x, Target = 0x%x" % (crc_local,crc_target))
if conf['ieee_address'] != 0:
ieee_addr = parse_ieee_address(conf['ieee_address'])
if PY3:
mdebug(5, "Setting IEEE address to %s" % (':'.join(['%02x' % b for b in struct.pack('>Q', ieee_addr)])))
ieee_addr_bytes = struct.pack('<Q', ieee_addr)
else:
mdebug(5, "Setting IEEE address to %s" % (':'.join(['%02x' % ord(b) for b in struct.pack('>Q', ieee_addr)])))
ieee_addr_bytes = [ord(b) for b in struct.pack('<Q', ieee_addr)]
if cmd.writeMemory(ADDR_IEEE_ADDRESS_SECONDARY, ieee_addr_bytes):
mdebug(5, " Set address done ")
else:
raise CmdException("Set address failed ")
if conf['read']:
length = conf['len']
if length < 4: # reading 4 bytes at a time
length = 4
else:
length = length + (length % 4)
mdebug(5, "Reading %s bytes starting at address 0x%x" % (length, conf['address']))
f = file(args[0], 'w').close() #delete previous file
for i in range(0,(length/4)):
rdata = cmd.cmdMemRead(conf['address']+(i*4)) #reading 4 bytes at a time
mdebug(5, " 0x%x: 0x%02x%02x%02x%02x" % (conf['address']+(i*4), ord(rdata[3]), ord(rdata[2]), ord(rdata[1]), ord(rdata[0])), '\r')
file(args[0], 'ab').write(''.join(reversed(rdata)))
mdebug(5, " Read done ")
cmd.cmdReset()
cmd.close(bsl=conf['bsl'])
except Exception as err:
exit('ERROR: %s' % str(err))
|
ciolo/Sniffer-OpenMote
|
OpenMoteFirmware/tools/openmote-bsl/cc2538-bsl/cc2538-bsl.py
|
Python
|
gpl-2.0
| 27,583
|
[
"CRYSTAL"
] |
8c6101c7e84dc3866c36bca13e1f8f60bbdbfb3ead02d61a24ea6e9320c1cd99
|
from FeatureBuilder import FeatureBuilder
# Amino acids from http://www.bio.davidson.edu/courses/genomics/jmol/aatable.html
#amino acid three letter code single letter code
subcomponent = set(["region", "promoter", "upstream", "fragment", "site",
"sequence", "segment", "repeat", "repeat", "element",
"duplication", "exon", "downstream", "terminus", "motif",
"frame", "carboxy-terminus", "domain", "subunit", "codon",
"promoter", "enhancer", "locus", "ltr", "helix-loop-helix",
"zinc-finger", "portion", "residue", "box", "intron"])
supergroup = set(["complex", "family", "octamer", "microtubule"])
aminoAcids = [
#nonpolar (hydrophobic)
("glycine", "gly", "g", "nonpolar", "neutral"),
("alanine", "ala", "a", "nonpolar", "neutral"),
("valine", "val", "v", "nonpolar", "neutral"),
("leucine", "leu", "l", "nonpolar", "neutral"),
("isoleucine", "ile", "i", "nonpolar", "neutral"),
("methionine", "met", "m", "nonpolar", "neutral"),
("phenylalanine", "phe", "f", "nonpolar", "neutral"),
("tryptophan", "trp", "w", "nonpolar", "neutral"),
("proline", "pro", "p", "nonpolar", "neutral"),
#polar (hydrophilic)
("serine", "ser", "s", "hydrophilic", "neutral"),
("threonine", "thr", "t", "hydrophilic", "neutral"),
("cysteine", "cys", "c", "hydrophilic", "neutral"),
("tyrosine", "tyr", "y", "hydrophilic", "neutral"),
("asparagine", "asn", "n", "hydrophilic", "neutral"),
("glutamine", "gln", "q", "hydrophilic", "neutral"),
#electrically charged (negative and hydrophilic)
("aspartic acid", "asp", "d", "hydrophilic", "negative"),
("glutamic acid", "glu", "e", "hydrophilic", "negative"),
#electrically charged (positive and hydrophilic)
("lysine", "lys", "k", "hydrophilic", "positive"),
("arginine", "arg", "r", "hydrophilic", "positive"),
("histidine", "his", "h", "hydrophilic", "positive")]
class RELFeatureBuilder(FeatureBuilder):
def __init__(self, featureSet):
FeatureBuilder.__init__(self, featureSet)
#self.noAnnType = False
#self.edgeTypesForFeatures = []
#self.useNonNameEntities = False
def findAminoAcid(self, string):
global aminoAcids
string = string.lower()
for aa in aminoAcids:
word = string.find(aa[0])
if word != -1:
return word, aa
else:
tlc = string.find(aa[1]) # three letter code
if tlc != -1:
# Three letter code must not be a part of a word (where it could be just a substring)
if (tlc == 0 or not string[tlc-1].isalpha()) and (tlc + 3 >= len(string) or not string[tlc + 3].isalpha()):
return tlc, aa
return -1, None
def buildAllFeatures(self, tokens, tokenIndex):
token = tokens[tokenIndex]
tokText = token.get("text").lower()
self.buildAminoAcidFeatures(tokText)
self.buildDNAFeatures(tokText)
self.buildSubstringFeatures(tokens, tokenIndex)
self.buildRangeFeatures(tokens, tokenIndex)
self.buildKnownWordFeatures(tokText)
def buildAminoAcidFeatures(self, string):
index, aa = self.findAminoAcid(string)
if aa != None:
self.setFeature("RELaminoacid_string")
self.setFeature("RELaminoacid_" + aa[1])
def findSubstring(self, string, substring, tag=None):
if tag == None:
tag = substring
index = string.find(substring)
if index != -1:
self.setFeature("RELsubstring_"+tag)
if index + len(substring) == len(string):
self.setFeature("RELsubstring_terminal_"+tag)
else:
self.setFeature("RELsubstring_nonterminal_"+tag)
def buildSubstringFeatures(self, tokens, tokenIndex):
string = ""
for t in tokens[tokenIndex-6:tokenIndex]:
# TODO the actual token does not seem to be included
string += t.get("text")
string = string.lower().replace("-", "").replace(" ", "")
# nfkb
self.findSubstring(string, "nfkappab", "nfkb")
self.findSubstring(string, "nfkb")
self.findSubstring(string, "nfkappab", "complex")
self.findSubstring(string, "nfkb", "complex")
# kappa-b
self.findSubstring(string, "kappab")
# ap-1
self.findSubstring(string, "ap1")
self.findSubstring(string, "activatingprotein1", "ap1")
self.findSubstring(string, "ap1", "complex")
self.findSubstring(string, "activatingprotein1", "complex")
# proteasome
self.findSubstring(string, "proteasome")
self.findSubstring(string, "proteasome", "complex")
# base pairs
self.findSubstring(string, "bp", "bp")
self.findSubstring(string, "basepair", "bp")
# primes
self.findSubstring(string, "5'", "5prime")
self.findSubstring(string, "3'", "3prime")
def buildDNAFeatures(self, string):
for letter in string:
if letter not in ["a", "g", "t", "c"]:
return
self.setFeature("RELDNA_sequence")
def buildRangeFeatures(self, tokens, tokenIndex):
if tokenIndex > 1:
if tokens[tokenIndex-1].get("text").lower() in ["to", "and", "-"]:
t1Text = tokens[tokenIndex-2].get("text")
if t1Text[0] == "-" or t1Text[0] == "+":
t1Text = t1Text[1:]
t2Text = tokens[tokenIndex].get("text")
if t2Text[0] == "-" or t2Text[0] == "+":
t2Text = t2Text[1:]
if t1Text.isdigit() and t2Text.isdigit():
self.setFeature("RELnumeric_range")
def buildKnownWordFeatures(self, string):
global subcomponent, supergroup
string = string.lower()
if string[-1] == "s":
singular = string[:-1]
else:
singular = None
if string in subcomponent or singular in subcomponent:
self.setFeature("RELknown_subcomponent")
if string in supergroup or singular in supergroup:
self.setFeature("RELknown_supergroup")
|
ashishbaghudana/mthesis-ashish
|
resources/tees/ExampleBuilders/FeatureBuilders/RELFeatureBuilder.py
|
Python
|
mit
| 6,349
|
[
"Jmol"
] |
c89a72445bd61b2be17418835e600454aeed450e1b09cfc827e56d5be44c398a
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_raises, assert_allclose,
assert_equal, assert_, assert_array_less)
from scipy._lib._numpy_compat import suppress_warnings
from scipy import signal, fftpack
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('hanning', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('slepian', (2,)),
('cosine', ()),
('hann', ()),
('exponential', ()),
('tukey', (0.5,)),
]
class TestBartHann(object):
def test_basic(self):
assert_allclose(signal.barthann(6, sym=True),
[0, 0.35857354213752, 0.8794264578624801,
0.8794264578624801, 0.3585735421375199, 0])
assert_allclose(signal.barthann(7),
[0, 0.27, 0.73, 1.0, 0.73, 0.27, 0])
assert_allclose(signal.barthann(6, False),
[0, 0.27, 0.73, 1.0, 0.73, 0.27])
class TestBartlett(object):
def test_basic(self):
assert_allclose(signal.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0])
assert_allclose(signal.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0])
assert_allclose(signal.bartlett(6, False),
[0, 1/3, 2/3, 1.0, 2/3, 1/3])
class TestBlackman(object):
def test_basic(self):
assert_allclose(signal.blackman(6, sym=False),
[0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14)
assert_allclose(signal.blackman(7, sym=False),
[0, 0.09045342435412804, 0.4591829575459636,
0.9203636180999081, 0.9203636180999081,
0.4591829575459636, 0.09045342435412804], atol=1e-8)
assert_allclose(signal.blackman(6),
[0, 0.2007701432625305, 0.8492298567374694,
0.8492298567374694, 0.2007701432625305, 0],
atol=1e-14)
assert_allclose(signal.blackman(7, True),
[0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14)
class TestBlackmanHarris(object):
def test_basic(self):
assert_allclose(signal.blackmanharris(6, False),
[6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])
assert_allclose(signal.blackmanharris(7, sym=False),
[6.0e-05, 0.03339172347815117, 0.332833504298565,
0.8893697722232837, 0.8893697722232838,
0.3328335042985652, 0.03339172347815122])
assert_allclose(signal.blackmanharris(6),
[6.0e-05, 0.1030114893456638, 0.7938335106543362,
0.7938335106543364, 0.1030114893456638, 6.0e-05])
assert_allclose(signal.blackmanharris(7, sym=True),
[6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645,
6.0e-05])
class TestBohman(object):
def test_basic(self):
assert_allclose(signal.bohman(6),
[0, 0.1791238937062839, 0.8343114522576858,
0.8343114522576858, 0.1791238937062838, 0])
assert_allclose(signal.bohman(7, sym=True),
[0, 0.1089977810442293, 0.6089977810442293, 1.0,
0.6089977810442295, 0.1089977810442293, 0])
assert_allclose(signal.bohman(6, False),
[0, 0.1089977810442293, 0.6089977810442293, 1.0,
0.6089977810442295, 0.1089977810442293])
class TestBoxcar(object):
def test_basic(self):
assert_allclose(signal.boxcar(6), [1, 1, 1, 1, 1, 1])
assert_allclose(signal.boxcar(7), [1, 1, 1, 1, 1, 1, 1])
assert_allclose(signal.boxcar(6, False), [1, 1, 1, 1, 1, 1])
cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938])
cheb_even_true = array([0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894])
class TestChebWin(object):
def test_basic(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
assert_allclose(signal.chebwin(6, 100),
[0.1046401879356917, 0.5075781475823447, 1.0, 1.0,
0.5075781475823447, 0.1046401879356917])
assert_allclose(signal.chebwin(7, 100),
[0.05650405062850233, 0.316608530648474,
0.7601208123539079, 1.0, 0.7601208123539079,
0.316608530648474, 0.05650405062850233])
assert_allclose(signal.chebwin(6, 10),
[1.0, 0.6071201674458373, 0.6808391469897297,
0.6808391469897297, 0.6071201674458373, 1.0])
assert_allclose(signal.chebwin(7, 10),
[1.0, 0.5190521247588651, 0.5864059018130382,
0.6101519801307441, 0.5864059018130382,
0.5190521247588651, 1.0])
assert_allclose(signal.chebwin(6, 10, False),
[1.0, 0.5190521247588651, 0.5864059018130382,
0.6101519801307441, 0.5864059018130382,
0.5190521247588651])
def test_cheb_odd_high_attenuation(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_odd = signal.chebwin(53, at=-40)
assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
def test_cheb_even_high_attenuation(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_even = signal.chebwin(54, at=40)
assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
def test_cheb_odd_low_attenuation(self):
cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,
0.610151, 0.586405, 0.519052,
1.000000])
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_odd = signal.chebwin(7, at=10)
assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
def test_cheb_even_low_attenuation(self):
cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,
0.541338, 0.541338, 0.51027,
0.451924, 1.000000])
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_even = signal.chebwin(8, at=-10)
assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
exponential_data = {
(4, None, 0.2, False):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03]),
(4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988,
0.0820849986238988, 0.00055308437014783]),
(4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342,
0.60653065971263342, 0.22313016014842982]),
(4, 2, 0.2, False):
array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03]),
(4, 2, 0.2, True): None,
(4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, 2, 1.0, True): None,
(5, None, 0.2, True):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 0.2, True): None,
(5, 2, 1.0, True): None
}
def test_exponential():
for k, v in exponential_data.items():
if v is None:
assert_raises(ValueError, signal.exponential, *k)
else:
win = signal.exponential(*k)
assert_allclose(win, v, rtol=1e-14)
class TestFlatTop(object):
def test_basic(self):
assert_allclose(signal.flattop(6, sym=False),
[-0.000421051, -0.051263156, 0.19821053, 1.0,
0.19821053, -0.051263156])
assert_allclose(signal.flattop(7, sym=False),
[-0.000421051, -0.03684078115492348,
0.01070371671615342, 0.7808739149387698,
0.7808739149387698, 0.01070371671615342,
-0.03684078115492348])
assert_allclose(signal.flattop(6),
[-0.000421051, -0.0677142520762119, 0.6068721525762117,
0.6068721525762117, -0.0677142520762119,
-0.000421051])
assert_allclose(signal.flattop(7, True),
[-0.000421051, -0.051263156, 0.19821053, 1.0,
0.19821053, -0.051263156, -0.000421051])
class TestGaussian(object):
def test_basic(self):
assert_allclose(signal.gaussian(6, 1.0),
[0.04393693362340742, 0.3246524673583497,
0.8824969025845955, 0.8824969025845955,
0.3246524673583497, 0.04393693362340742])
assert_allclose(signal.gaussian(7, 1.2),
[0.04393693362340742, 0.2493522087772962,
0.7066482778577162, 1.0, 0.7066482778577162,
0.2493522087772962, 0.04393693362340742])
assert_allclose(signal.gaussian(7, 3),
[0.6065306597126334, 0.8007374029168081,
0.9459594689067654, 1.0, 0.9459594689067654,
0.8007374029168081, 0.6065306597126334])
assert_allclose(signal.gaussian(6, 3, False),
[0.6065306597126334, 0.8007374029168081,
0.9459594689067654, 1.0, 0.9459594689067654,
0.8007374029168081])
class TestHamming(object):
def test_basic(self):
assert_allclose(signal.hamming(6, False),
[0.08, 0.31, 0.77, 1.0, 0.77, 0.31])
assert_allclose(signal.hamming(7, sym=False),
[0.08, 0.2531946911449826, 0.6423596296199047,
0.9544456792351128, 0.9544456792351128,
0.6423596296199047, 0.2531946911449826])
assert_allclose(signal.hamming(6),
[0.08, 0.3978521825875242, 0.9121478174124757,
0.9121478174124757, 0.3978521825875242, 0.08])
assert_allclose(signal.hamming(7, sym=True),
[0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08])
class TestHann(object):
def test_basic(self):
assert_allclose(signal.hann(6, sym=False),
[0, 0.25, 0.75, 1.0, 0.75, 0.25])
assert_allclose(signal.hann(7, sym=False),
[0, 0.1882550990706332, 0.6112604669781572,
0.9504844339512095, 0.9504844339512095,
0.6112604669781572, 0.1882550990706332])
assert_allclose(signal.hann(6, True),
[0, 0.3454915028125263, 0.9045084971874737,
0.9045084971874737, 0.3454915028125263, 0])
assert_allclose(signal.hann(7),
[0, 0.25, 0.75, 1.0, 0.75, 0.25, 0])
class TestKaiser(object):
def test_basic(self):
assert_allclose(signal.kaiser(6, 0.5),
[0.9403061933191572, 0.9782962393705389,
0.9975765035372042, 0.9975765035372042,
0.9782962393705389, 0.9403061933191572])
assert_allclose(signal.kaiser(7, 0.5),
[0.9403061933191572, 0.9732402256999829,
0.9932754654413773, 1.0, 0.9932754654413773,
0.9732402256999829, 0.9403061933191572])
assert_allclose(signal.kaiser(6, 2.7),
[0.2603047507678832, 0.6648106293528054,
0.9582099802511439, 0.9582099802511439,
0.6648106293528054, 0.2603047507678832])
assert_allclose(signal.kaiser(7, 2.7),
[0.2603047507678832, 0.5985765418119844,
0.8868495172060835, 1.0, 0.8868495172060835,
0.5985765418119844, 0.2603047507678832])
assert_allclose(signal.kaiser(6, 2.7, False),
[0.2603047507678832, 0.5985765418119844,
0.8868495172060835, 1.0, 0.8868495172060835,
0.5985765418119844])
class TestNuttall(object):
def test_basic(self):
assert_allclose(signal.nuttall(6, sym=False),
[0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345])
assert_allclose(signal.nuttall(7, sym=False),
[0.0003628, 0.03777576895352025, 0.3427276199688195,
0.8918518610776603, 0.8918518610776603,
0.3427276199688196, 0.0377757689535203])
assert_allclose(signal.nuttall(6),
[0.0003628, 0.1105152530498718, 0.7982580969501282,
0.7982580969501283, 0.1105152530498719, 0.0003628])
assert_allclose(signal.nuttall(7, True),
[0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345, 0.0003628])
class TestParzen(object):
def test_basic(self):
assert_allclose(signal.parzen(6),
[0.009259259259259254, 0.25, 0.8611111111111112,
0.8611111111111112, 0.25, 0.009259259259259254])
assert_allclose(signal.parzen(7, sym=True),
[0.00583090379008747, 0.1574344023323616,
0.6501457725947521, 1.0, 0.6501457725947521,
0.1574344023323616, 0.00583090379008747])
assert_allclose(signal.parzen(6, False),
[0.00583090379008747, 0.1574344023323616,
0.6501457725947521, 1.0, 0.6501457725947521,
0.1574344023323616])
class TestTriang(object):
def test_basic(self):
assert_allclose(signal.triang(6, True),
[1/6, 1/2, 5/6, 5/6, 1/2, 1/6])
assert_allclose(signal.triang(7),
[1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4])
assert_allclose(signal.triang(6, sym=False),
[1/4, 1/2, 3/4, 1, 3/4, 1/2])
tukey_data = {
(4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]),
(4, 0.9, True): array([0.0, 0.84312081893436686,
0.84312081893436686, 0.0]),
(4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]),
(4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]),
(4, 0.9, False): array([0.0, 0.58682408883346526,
1.0, 0.58682408883346526]),
(4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]),
(5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]),
(5, 0.8, True): array([0.0, 0.69134171618254492,
1.0, 0.69134171618254492, 0.0]),
(5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]),
(6, 0): [1, 1, 1, 1, 1, 1],
(7, 0): [1, 1, 1, 1, 1, 1, 1],
(6, .25): [0, 1, 1, 1, 1, 0],
(7, .25): [0, 1, 1, 1, 1, 1, 0],
(6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0],
(7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0],
(6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0],
(7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0,
0.9698463103929542, 0.4131759111665347, 0],
(6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737,
0.3454915028125263, 0],
(7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0],
}
class TestTukey(object):
def test_basic(self):
# Test against hardcoded data
for k, v in tukey_data.items():
if v is None:
assert_raises(ValueError, signal.tukey, *k)
else:
win = signal.tukey(*k)
assert_allclose(win, v, rtol=1e-14)
def test_extremes(self):
# Test extremes of alpha correspond to boxcar and hann
tuk0 = signal.tukey(100, 0)
box0 = signal.boxcar(100)
assert_array_almost_equal(tuk0, box0)
tuk1 = signal.tukey(100, 1)
han1 = signal.hann(100)
assert_array_almost_equal(tuk1, han1)
class TestGetWindow(object):
def test_boxcar(self):
w = signal.get_window('boxcar', 12)
assert_array_equal(w, np.ones_like(w))
# window is a tuple of len 1
w = signal.get_window(('boxcar',), 16)
assert_array_equal(w, np.ones_like(w))
def test_cheb_odd(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
w = signal.get_window(('chebwin', -40), 53, fftbins=False)
assert_array_almost_equal(w, cheb_odd_true, decimal=4)
def test_cheb_even(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
w = signal.get_window(('chebwin', 40), 54, fftbins=False)
assert_array_almost_equal(w, cheb_even_true, decimal=4)
def test_kaiser_float(self):
win1 = signal.get_window(7.2, 64)
win2 = signal.kaiser(64, 7.2, False)
assert_allclose(win1, win2)
def test_invalid_inputs(self):
# Window is not a float, tuple, or string
assert_raises(ValueError, signal.get_window, set('hann'), 8)
# Unknown window type error
assert_raises(ValueError, signal.get_window, 'broken', 4)
def test_array_as_window(self):
# github issue 3603
osfactor = 128
sig = np.arange(128)
win = signal.get_window(('kaiser', 8.0), osfactor // 2)
assert_raises(ValueError, signal.resample,
(sig, len(sig) * osfactor), {'window': win})
def test_windowfunc_basics():
for window_name, params in window_funcs:
window = getattr(signal, window_name)
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
# Check symmetry for odd and even lengths
w1 = window(8, *params, sym=True)
w2 = window(7, *params, sym=False)
assert_array_almost_equal(w1[:-1], w2)
w1 = window(9, *params, sym=True)
w2 = window(8, *params, sym=False)
assert_array_almost_equal(w1[:-1], w2)
# Check that functions run and output lengths are correct
assert_equal(len(window(6, *params, sym=True)), 6)
assert_equal(len(window(6, *params, sym=False)), 6)
assert_equal(len(window(7, *params, sym=True)), 7)
assert_equal(len(window(7, *params, sym=False)), 7)
# Check invalid lengths
assert_raises(ValueError, window, 5.5, *params)
assert_raises(ValueError, window, -7, *params)
# Check degenerate cases
assert_array_equal(window(0, *params, sym=True), [])
assert_array_equal(window(0, *params, sym=False), [])
assert_array_equal(window(1, *params, sym=True), [1])
assert_array_equal(window(1, *params, sym=False), [1])
# Check dtype
assert_(window(0, *params, sym=True).dtype == 'float')
assert_(window(0, *params, sym=False).dtype == 'float')
assert_(window(1, *params, sym=True).dtype == 'float')
assert_(window(1, *params, sym=False).dtype == 'float')
assert_(window(6, *params, sym=True).dtype == 'float')
assert_(window(6, *params, sym=False).dtype == 'float')
# Check normalization
assert_array_less(window(10, *params, sym=True), 1.01)
assert_array_less(window(10, *params, sym=False), 1.01)
assert_array_less(window(9, *params, sym=True), 1.01)
assert_array_less(window(9, *params, sym=False), 1.01)
# Check that DFT-even spectrum is purely real for odd and even
assert_allclose(fftpack.fft(window(10, *params, sym=False)).imag,
0, atol=1e-14)
assert_allclose(fftpack.fft(window(11, *params, sym=False)).imag,
0, atol=1e-14)
def test_needs_params():
for winstr in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss', 'dpss',
'chebwin', 'cheb', 'exponential', 'poisson', 'tukey',
'tuk']:
assert_raises(ValueError, signal.get_window, winstr, 7)
|
apbard/scipy
|
scipy/signal/tests/test_windows.py
|
Python
|
bsd-3-clause
| 23,399
|
[
"Gaussian"
] |
2dc4acbec662e42d291cb932c4fdb969505c4fc98ee919771946dc381428ee1d
|
#
# MainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'gulp'
tab.settings['Output file name'] = 'na2so42.gout'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'program'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Effective medium method'] = 'Averaged Permittivity'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Legend'] = 'Averaged permittivity'
# Add new scenarios
methods = ['Maxwell-Garnett','Bruggeman']
shapes = ['Needle', 'Ellipsoid','Plate']
hkls = [[0,0,1], [0,0,1], [1,0,0]]
for method in methods:
for shape,hkl in zip(shapes,hkls):
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Particle shape'] = shape
tab.settings['Effective medium method'] = method
tab.settings['Unique direction - h'] = hkl[0]
tab.settings['Unique direction - k'] = hkl[1]
tab.settings['Unique direction - l'] = hkl[2]
tab.settings['Legend'] = method + ' ' + shape + ' ' +str(hkl)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0
tab.settings['Maximum frequency'] = 300
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Gulp - Na2(SO4)2'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 800
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
|
JohnKendrick/PDielec
|
Examples/Gulp/Na2SO42/script.py
|
Python
|
mit
| 2,000
|
[
"GULP"
] |
4571783e2a0021e349fe8ca8d58287b9e83c2fc2c40dbeadfb0bf3395699c797
|
#
# Copyright (C) 2017-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.cuda_init
import espressomd.electrostatics
import tests_common
@utx.skipIfMissingFeatures(["ELECTROSTATICS"])
class CoulombCloudWallTune(ut.TestCase):
"""This compares p3m, p3m_gpu electrostatic forces against stored data."""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
tolerance = 1E-3
def setUp(self):
self.system.box_l = (10, 10, 10)
self.system.time_step = 0.01
self.system.cell_system.skin = 0.4
# Clear actors that might be left from prev tests
self.system.actors.clear()
self.system.part.clear()
data = np.load(tests_common.abspath("data/coulomb_tuning_system.npz"))
self.forces = []
# Add particles to system and store reference forces in hash
# Input format: id pos q f
for id in range(len(data['pos'])):
pos = data['pos'][id]
q = data['charges'][id]
self.forces.append(data['forces'][id])
self.system.part.add(id=id, pos=pos, q=q)
def compare(self, method_name):
# Compare forces now in the system to stored ones
force_abs_diff = 0.
for p in self.system.part:
force_abs_diff += np.linalg.norm(p.f - self.forces[p.id])
force_abs_diff /= len(self.system.part)
self.assertLessEqual(
force_abs_diff,
self.tolerance,
"Absolute force difference " +
str(force_abs_diff) +
" too large for method " +
method_name)
# Tests for individual methods
@utx.skipIfMissingFeatures(["P3M"])
def test_p3m(self):
# We have to add some tolerance here, because the reference
# system is not homogeneous
self.system.actors.add(
espressomd.electrostatics.P3M(prefactor=1., accuracy=5e-4,
tune=True))
self.system.integrator.run(0)
self.compare("p3m")
@utx.skipIfMissingGPU()
def test_p3m_gpu(self):
# We have to add some tolerance here, because the reference
# system is not homogeneous
self.system.actors.add(
espressomd.electrostatics.P3MGPU(prefactor=1., accuracy=5e-4,
tune=True))
self.system.integrator.run(0)
self.compare("p3m_gpu")
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/python/coulomb_tuning.py
|
Python
|
gpl-3.0
| 3,268
|
[
"ESPResSo"
] |
6b50e23da602867fc2f9f4d8a86b4bbb19fe5a1cb8308f83a446d1398cc21c2b
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Testmodule for the Reaction Ensemble.
"""
import sys
import os
import unittest as ut
import numpy as np
import espressomd # pylint: disable=import-error
from espressomd import reaction_ensemble
class ReactionEnsembleTest(ut.TestCase):
"""Test the core implementation of the reaction ensemble."""
N0 = 40
c0 = 0.00028
type_HA = 0
type_A = 1
type_H = 5
temperature = 1.0
# avoid extreme regions in the titration curve e.g. via the choice
# choose target alpha not too far from 0.5 to get good statistics in a small number of steps
pKa_minus_pH = -0.2
pH = 2
pKa = pKa_minus_pH + pH
Ka = 10**(-pKa)
box_l = (N0 / c0)**(1.0 / 3.0)
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.seed = system.cell_system.get_state()['n_nodes'] * [2]
np.random.seed(69) #make reaction code fully deterministic
system.cell_system.skin = 0.4
system.time_step = 0.01
RE = reaction_ensemble.ConstantpHEnsemble(
temperature=1.0,
exclusion_radius=1)
@classmethod
def setUpClass(cls):
"""Prepare a testsystem."""
for i in range(0, 2 * cls.N0, 2):
cls.system.part.add(id=i, pos=np.random.random(
3) * cls.system.box_l, type=cls.type_A)
cls.system.part.add(id=i + 1, pos=np.random.random(3) *
cls.system.box_l, type=cls.type_H)
cls.RE.add_reaction(
gamma=cls.Ka, reactant_types=[
cls.type_HA], reactant_coefficients=[1], product_types=[
cls.type_A, cls.type_H], product_coefficients=[
1, 1], default_charges={cls.type_HA: 0, cls.type_A: -1, cls.type_H: +1})
cls.RE.constant_pH = cls.pH
@classmethod
def ideal_alpha(cls, pH):
return 1.0 / (1 + 10**(cls.pKa - pH))
def test_ideal_titration_curve(self):
N0 = ReactionEnsembleTest.N0
temperature = ReactionEnsembleTest.temperature
type_A = ReactionEnsembleTest.type_A
type_H = ReactionEnsembleTest.type_H
type_HA = ReactionEnsembleTest.type_HA
box_l = ReactionEnsembleTest.system.box_l
system = ReactionEnsembleTest.system
RE = ReactionEnsembleTest.RE
#chemical warmup - get close to chemical equilibrium before we start sampling
RE.reaction(5*N0)
volume = np.prod(self.system.box_l) # cuboid box
average_NH = 0.0
average_NHA = 0.0
average_NA = 0.0
num_samples = 1000
for i in range(num_samples):
RE.reaction(2)
average_NH += system.number_of_particles( type=type_H)
average_NHA += system.number_of_particles( type=type_HA)
average_NA += system.number_of_particles( type=type_A)
average_NH /= num_samples
average_NA /= num_samples
average_NHA /= num_samples
average_alpha = average_NA / float(N0)
# note you cannot calculate the pH via -log10(<NH>/volume) in the
# constant pH ensemble, since the volume is totally arbitrary and does
# not influence the average number of protons
pH = ReactionEnsembleTest.pH
pKa = ReactionEnsembleTest.pKa
target_alpha=ReactionEnsembleTest.ideal_alpha(pH);
rel_error_alpha = abs(
average_alpha - target_alpha )/target_alpha; # relative error
self.assertLess(
rel_error_alpha,
0.07,
msg="\nDeviation from ideal titration curve is too big for the given input parameters.\n"
+" pH: "+str(pH)
+" pKa: "+str(pKa)
+" average_NH: "+str(average_NH)
+" average_NA: "+str(average_NA)
+" average_NHA:"+str(average_NHA)
+" average alpha: "+str(average_alpha)
+" target_alpha: "+str(target_alpha)
+" rel_error: "+str(rel_error_alpha)
)
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
|
KonradBreitsprecher/espresso
|
testsuite/constant_pH.py
|
Python
|
gpl-3.0
| 4,744
|
[
"ESPResSo"
] |
9845d5163f5d2a9e97fa08d7f08d2523c62e7e64b2389544c9a456f0b9528376
|
# -*- coding: utf-8 -*-
u"""Types for Genesis parameters
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
#: Dictionary of display names
import enum
from radtrack import rt_enum
@enum.unique
class UndulatorType(rt_enum.Enum):
PLANAR = 0
HELICAL = 1
@enum.unique
class TaperType(rt_enum.Enum):
NONE = 0
LINEAR = 1
QUADRATIC = 2
@enum.unique
class ErrorType(rt_enum.Enum):
GAUSSIAN_MINMIZE = -2
UNIFORM_MINIMIZE = -1
NONE = 0
UNIFORM = 1
GAUSSIAN = 2
@enum.unique
class Coupling(rt_enum.Enum):
AUTO = 0
HELICAL = 1
@enum.unique
class EnergyProfile(rt_enum.Enum):
UNIFORM = 1
GAUSSIAN = 0
@enum.unique
class TransProfile(rt_enum.Enum):
GAUSSIAN = 1
UNIFORM = 2
PARABOLIC = 3
@enum.unique
class GenerateGaus(rt_enum.Enum):
JOINTPROBABILITY = 0
INVERTEDERROR = 1
@enum.unique
class Boundary(rt_enum.Enum):
DIRECHLET = 0
NEUMANN = 1
@enum.unique
class SCCalc(rt_enum.Enum):
ONCE = 0
FOUR = 1
@enum.unique
class CellStart(rt_enum.Enum):
FULL = 0.0
HALF = 0.5
@enum.unique
class ShotnoiseAlgorithm(rt_enum.Enum):
FAWLEY = 0
PENNMAN = 1
@enum.unique
class ScanVar(rt_enum.Enum):
NONE = 0
GAMMA0 = 1
DELGAM = 2
CURPEAK = 3
XLAMDS = 4
AW0 = 5
ISEED = 6
PXBEAM = 7
PYBEAM = 8
XBEAM = 9
YBEAM = 10
RXBEAM = 11
RYBEAM = 12
XLAMD = 13
DELAW = 14
ALPHAX = 15
ALPHAY = 16
EMITX = 17
EMITY = 18
PRAD0 = 19
ZRAYL = 20
ZWAIST = 21
AWD = 22
BEAMFILE = 23
BEAMOPT = 24
BEAMGAM = 25
@enum.unique
class FFspectrum(rt_enum.Enum):
FAR_FIELD = -1
NEAR_FIELD = 0
TOTAL = 1
|
radiasoft/radtrack
|
radtrack/genesis_enums.py
|
Python
|
apache-2.0
| 1,772
|
[
"Gaussian"
] |
2f4476e6ccee968837d07d005c4823c62b108dffd3d726cda734b4d5c2235542
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Tuple
import locale
def GetLocale() -> str:
""" gets the locale / language from the system """
uCurrent:str = 'English'
try:
tCurrent:Tuple = locale.getdefaultlocale()
if "de_" in tCurrent[0]:
uCurrent="German"
except Exception:
pass
return uCurrent
|
thica/ORCA-Remote
|
src/ORCA/utils/Platform/generic/generic_GetLocale.py
|
Python
|
gpl-3.0
| 1,195
|
[
"ORCA"
] |
a4247531f75b8952a2ccd532dd8caf5dc386bb616e901cb85691cc5dbfa1da36
|
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are
noted.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning nans, or other appropriate values.
Some of the special function routines will emit warnings when an error
occurs. By default this is disabled. To enable such messages use
``errprint(1)``, and to disable such messages use ``errprint(0)``.
Example:
>>> print scipy.special.bdtr(-1,10,0.3)
>>> scipy.special.errprint(1)
>>> print scipy.special.bdtr(-1,10,0.3)
.. autosummary::
:toctree: generated/
errprint
SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions
ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)
bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)
itairy --
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- ellipkm1(x) == ellipk(1 - x)
ellipkinc -- Incomplete elliptic integral of the first kind.
ellipe -- Complete elliptic integral of the second kind.
ellipeinc -- Incomplete elliptic integral of the second kind.
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of real-valued order and complex argument.
jn -- Alias for jv
jve -- Exponentially scaled Bessel function.
yn -- Bessel function of second kind (integer order).
yv -- Bessel function of the second kind (real-valued order).
yve -- Exponentially scaled Bessel function of the second kind.
kn -- Modified Bessel function of the second kind (integer order).
kv -- Modified Bessel function of the second kind (real order).
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function.
ive -- Exponentially scaled modified Bessel function.
hankel1 -- Hankel function of the first kind.
hankel1e -- Exponentially scaled Hankel function of the first kind.
hankel2 -- Hankel function of the second kind.
hankel2e -- Exponentially scaled Hankel function of the second kind.
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Sequence of lambda functions with arbitrary order v.
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.
jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.
jn_zeros -- [+]Zeros of Jn(x)
jnp_zeros -- [+]Zeros of Jn'(x)
yn_zeros -- [+]Zeros of Yn(x)
ynp_zeros -- [+]Zeros of Yn'(x)
y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)
y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)
y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of order 0.
j1 -- Bessel function of order 1.
y0 -- Bessel function of second kind of order 0.
y1 -- Bessel function of second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0.
k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.
k1 -- Modified Bessel function of the second kind of order 1.
k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Basic integrals of j0 and y0 from 0 to x.
it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.
iti0k0 -- Basic integrals of i0 and k0 from 0 to x.
it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.
besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Nth derivative of Jv(v,z)
yvp -- Nth derivative of Yv(v,z)
kvp -- Nth derivative of Kv(v,z)
ivp -- Nth derivative of Iv(v,z)
h1vp -- Nth derivative of H1v(v,z)
h2vp -- Nth derivative of H2v(v,z)
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind, jn(z)
spherical_yn -- Spherical Bessel function of the second kind, yn(z)
spherical_in -- Modified spherical Bessel function of the first kind, in(z)
spherical_kn -- Modified spherical Bessel function of the second kind, kn(z)
These are not universal functions:
.. autosummary::
:toctree: generated/
sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)
sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)
sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)
sph_in -- [+]Sequence of spherical Bessel functions, in(z)
sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)
sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.
riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function --- Hv(x)
modstruve -- Modified Struve function --- Lv(x)
itstruve0 -- Integral of H0(t) from 0 to x
it2struve0 -- Integral of H0(t)/t from x to Inf.
itmodstruve0 -- Integral of L0(t) from 0 to x.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Sum of terms 0 through k of the binomial pdf.
bdtrc -- Sum of terms k+1 through n of the binomial pdf.
bdtri -- Inverse of bdtr
bdtrik --
bdtrin --
btdtr -- Integral from 0 to x of beta pdf.
btdtri -- Quantiles of beta distribution
btdtria --
btdtrib --
fdtr -- Integral from 0 to x of F pdf.
fdtrc -- Integral from x to infinity under F pdf.
fdtri -- Inverse of fdtrc
fdtridfd --
gdtr -- Integral from 0 to x of gamma pdf.
gdtrc -- Integral from x to infinity under gamma pdf.
gdtria -- Inverse with respect to `a` of gdtr.
gdtrib -- Inverse with respect to `b` of gdtr.
gdtrix -- Inverse with respect to `x` of gdtr.
nbdtr -- Sum of terms 0 through k of the negative binomial pdf.
nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.
nbdtri -- Inverse of nbdtr
nbdtrik --
nbdtrin --
ncfdtr -- CDF of non-central t distribution.
ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.
ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.
ncfdtri -- Inverse CDF of noncentral F distribution.
ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.
nctdtr -- CDF of noncentral t distribution.
nctdtridf -- Find degrees of freedom of noncentral t distribution.
nctdtrit -- Inverse CDF of noncentral t distribution.
nctdtrinc -- Find noncentrality parameter of noncentral t distribution.
nrdtrimn -- Find mean of normal distribution from cdf and std.
nrdtrisd -- Find std of normal distribution from cdf and mean.
pdtr -- Sum of terms 0 through k of the Poisson pdf.
pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.
pdtri -- Inverse of pdtr
pdtrik --
stdtr -- Integral from -infinity to t of the Student-t pdf.
stdtridf --
stdtrit --
chdtr -- Integral from 0 to x of the Chi-square pdf.
chdtrc -- Integral from x to infnity of Chi-square pdf.
chdtri -- Inverse of chdtrc.
chdtriv --
ndtr -- Integral from -infinity to x of standard normal pdf
log_ndtr -- Logarithm of integral from -infinity to x of standard normal pdf
ndtri -- Inverse of ndtr (quantiles)
chndtr --
chndtridf --
chndtrinc --
chndtrix --
smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)
smirnovi -- Inverse of smirnov.
kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.
kolmogi -- Inverse of kolmogorov
tklmbda -- Tukey-Lambda CDF
logit --
expit --
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + x.
inv_boxcox -- Compute the inverse of the Box-Cox tranformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation of 1 + x.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- entr(x) = -x*log(x)
rel_entr -- rel_entr(x, y) = x*log(x/y)
kl_div -- kl_div(x, y) = x*log(x/y) - x + y
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Log of the absolute value of the Gamma function.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Incomplete gamma integral.
gammaincinv -- Inverse of gammainc.
gammaincc -- Complemented incomplete gamma integral.
gammainccinv -- Inverse of gammaincc.
beta -- Beta function.
betaln -- Log of the absolute value of the beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse of betainc.
psi -- Logarithmic derivative of the gamma function.
rgamma -- One divided by the gamma function.
polygamma -- Nth derivative of psi function.
multigammaln -- Log of the multivariate gamma.
digamma -- Digamma function (derivative of the logarithm of gamma).
poch -- The Pochhammer symbol (rising factorial).
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Error function.
erfc -- Complemented error function (1- erf(x))
erfcx -- Scaled complemented error function exp(x**2)*erfc(x)
erfi -- Imaginary error function, -i erf(i x)
erfinv -- Inverse of error function
erfcinv -- Inverse of erfc
wofz -- Fadeeva function.
dawsn -- Dawson's integral.
fresnel -- Fresnel sine and cosine integrals.
fresnel_zeros -- Complex zeros of both Fresnel integrals
modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)
modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Complex zeros of erf(z)
fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals
fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre Function of arbitrary non-negative degree v.
sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.
lpn -- [+]Legendre Functions (polynomials) of the first kind
lqn -- [+]Legendre Functions of the second kind.
lpmn -- [+]Associated Legendre Function of the first kind for real arguments.
lqmn -- [+]Associated Legendre Function of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic E
ellip_harm_2 -- Ellipsoidal harmonic F
ellip_normal -- Ellipsoidal normalization constant
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre
eval_legendre
eval_chebyt
eval_chebyu
eval_chebyc
eval_chebys
eval_jacobi
eval_laguerre
eval_genlaguerre
eval_hermite
eval_hermitenorm
eval_gegenbauer
eval_sh_legendre
eval_sh_chebyt
eval_sh_chebyu
eval_sh_jacobi
The functions below, in turn, return the polynomial coefficients in
:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).
chebyt -- [+]Chebyshev polynomial T_n(x)
chebyu -- [+]Chebyshev polynomial U_n(x)
chebyc -- [+]Chebyshev polynomial C_n(x)
chebys -- [+]Chebyshev polynomial S_n(x)
jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)
laguerre -- [+]Laguerre polynomial, L_n(x)
genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)
hermite -- [+]Hermite polynomial H_n(x)
hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)
gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)
sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)
sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)
sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)
sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Roots and weights for orthogonal polynomials
.. autosummary::
:toctree: generated/
c_roots
cg_roots
h_roots
he_roots
j_roots
js_roots
l_roots
la_roots
p_roots
ps_roots
s_roots
t_roots
ts_roots
u_roots
us_roots
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function (2F1)
hyp1f1 -- Confluent hypergeometric function (1F1)
hyperu -- Confluent hypergeometric function (U)
hyp0f1 -- Confluent hypergeometric limit function (0F1)
hyp2f0 -- Hypergeometric function (2F0)
hyp1f2 -- Hypergeometric function (1F2)
hyp3f0 -- Hypergeometric function (3F0)
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function Dv(x) and derivative.
pbvv -- Parabolic cylinder function Vv(x) and derivative.
pbwa -- Parabolic cylinder function W(a,x) and derivative.
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)
pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)
pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic values for even solution (ce_m)
mathieu_b -- Characteristic values for odd solution (se_m)
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]sequence of expansion coefficients for even solution
mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function
mathieu_sem -- Odd Mathieu function
mathieu_modcem1 -- Even modified Mathieu function of the first kind
mathieu_modcem2 -- Even modified Mathieu function of the second kind
mathieu_modsem1 -- Odd modified Mathieu function of the first kind
mathieu_modsem2 -- Odd modified Mathieu function of the second kind
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind
pro_rad1 -- Prolate spheroidal radial function of the first kind
pro_rad2 -- Prolate spheroidal radial function of the second kind
obl_ang1 -- Oblate spheroidal angular function of the first kind
obl_rad1 -- Oblate spheroidal radial function of the first kind
obl_rad2 -- Oblate spheroidal radial function of the second kind
pro_cv -- Compute characteristic value for prolate functions
obl_cv -- Compute characteristic value for oblate functions
pro_cv_seq -- Compute sequence of prolate characteristic values
obl_cv_seq -- Compute sequence of oblate characteristic values
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function of the first kind
pro_rad1_cv -- Prolate spheroidal radial function of the first kind
pro_rad2_cv -- Prolate spheroidal radial function of the second kind
obl_ang1_cv -- Oblate spheroidal angular function of the first kind
obl_rad1_cv -- Oblate spheroidal radial function of the first kind
obl_rad2_cv -- Oblate spheroidal radial function of the second kind
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- All Kelvin functions (order 0) and derivatives.
kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives
ber -- Kelvin function ber x
bei -- Kelvin function bei x
berp -- Derivative of Kelvin function ber x
beip -- Derivative of Kelvin function bei x
ker -- Kelvin function ker x
kei -- Kelvin function kei x
kerp -- Derivative of Kelvin function ker x
keip -- Derivative of Kelvin function kei x
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Zeros of Kelvin function bei x
bei_zeros -- [+]Zeros of Kelvin function ber x
berp_zeros -- [+]Zeros of derivative of Kelvin function ber x
beip_zeros -- [+]Zeros of derivative of Kelvin function bei x
ker_zeros -- [+]Zeros of Kelvin function kei x
kei_zeros -- [+]Zeros of Kelvin function ker x
kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x
keip_zeros -- [+]Zeros of derivative of Kelvin function kei x
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]Combinations of N things taken k at a time, "N choose k"
perm -- [+]Permutations of N things taken k at a time, "k-permutations of N"
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic-Geometric Mean
bernoulli -- Bernoulli numbers
binom -- Binomial coefficient.
diric -- Dirichlet function (periodic sinc)
euler -- Euler numbers
expn -- Exponential integral.
exp1 -- Exponential integral of order 1 (for complex argument)
expi -- Another exponential integral -- Ei(x)
factorial -- The factorial function, n! = special.gamma(n+1)
factorial2 -- Double factorial, (n!)!
factorialk -- [+](...((n!)!)!...)! where there are k '!'
shichi -- Hyperbolic sine and cosine integrals.
sici -- Integral of the sinc and "cosinc" functions.
spence -- Spence's function, also known as the dilogarithm.
lambertw -- Lambert W function
zeta -- Riemann zeta function of two arguments.
zetac -- Standard Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root.
exp10 -- 10 raised to the x power.
exp2 -- 2 raised to the x power.
radian -- radian angle given degrees, minutes, and seconds.
cosdg -- cosine of the angle given in degrees.
sindg -- sine of the angle given in degrees.
tandg -- tangent of the angle given in degrees.
cotdg -- cotangent of the angle given in degrees.
log1p -- log(1+x)
expm1 -- exp(x)-1
cosm1 -- cos(x)-1
round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.
xlogy -- x*log(y)
xlog1py -- x*log1p(y)
exprel -- (exp(x)-1)/x
sinc -- sin(x)/x
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from ._ufuncs import *
from .basic import *
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
from ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in,
spherical_kn)
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from numpy.testing import Tester
test = Tester().test
|
haudren/scipy
|
scipy/special/__init__.py
|
Python
|
bsd-3-clause
| 23,072
|
[
"Gaussian"
] |
6139f0fbdfce7e51db0b3ae0db7a63747bd69ae218111664905f8e29fb987bf2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2016 Sovrasov V. - All Rights Reserved
* You may use, distribute and modify this code under the
* terms of the MIT license.
* You should have received a copy of the MIT license with
* this file. If not visit https://opensource.org/licenses/MIT
'''
import numpy as np
from miscFunctions import *
class TSK0():
def __init__(self):
self.centers = []
self.vars = []
self.b = []
self.numberOfRules = 0
self.inputDimension = 0
def __evaluateConfidence(self, ruleID, x):
expValues = [np.exp(-0.5*((x[i] - self.centers[ruleID][i]) / self.vars[ruleID][i])**2)
for i in xrange(self.inputDimension)]
return np.prod(expValues)
def getParametersBounds(self):
lBound = [0.0]*self.numberOfRules*(1 + 2*self.inputDimension)
uBound = [1.0]*self.numberOfRules*self.inputDimension
uBound.extend([5.0] * ((1 + self.inputDimension) * self.numberOfRules))
return lBound, uBound
def code(self):
parameters = []
for center in self.centers:
parameters.extend(center)
for var in self.vars:
parameters.extend(var)
parameters.extend(self.b)
return parameters
def decode(self, parameters):
for i in xrange(self.numberOfRules):
self.centers[i] = parameters[i*self.inputDimension : (i+1)* \
self.inputDimension]
offset = self.numberOfRules*self.inputDimension
for i in xrange(self.numberOfRules):
self.vars[i] = parameters[offset + i*self.inputDimension : (i+1)* \
self.inputDimension + offset]
self.b = parameters[self.numberOfRules*self.inputDimension*2 :]
def initFromClusters(self, clusterCenters, x, y):
self.centers = clusterCenters
self.numberOfRules = len(clusterCenters)
self.inputDimension = len(x[0])
for i in xrange(self.numberOfRules):
distances = []
for j in xrange(self.numberOfRules):
if j != i:
distances.append(dist(self.centers[i], self.centers[j]))
else:
distances.append(float('inf'))
h = np.argmin(distances)
self.vars.append([distances[h] / 1.5]*self.inputDimension)
for i in xrange(self.numberOfRules):
confidences = [self.__evaluateConfidence(i, vector) for vector in x]
multiplicatedConfidences = np.multiply(confidences, y)
self.b.append(np.sum(multiplicatedConfidences) / np.sum(confidences))
def predictRaw(self, x):
firstLayersOutput = [self.__evaluateConfidence(i, x) for i in xrange(self.numberOfRules)]
sum2 = np.sum(firstLayersOutput)
sum1 = np.sum(np.multiply(firstLayersOutput, self.b))
return sum1 / sum2
def predict(self, x):
return np.ceil(self.predictRaw(x) - 0.5)
def score(self, x, y):
answers = [self.predict(vector) for vector in x]
return np.where(np.array(answers) == np.array(y))[0].size / float(len(y))
|
sovrasov/fuzzy-ml
|
src/tsk0Model.py
|
Python
|
mit
| 3,158
|
[
"VisIt"
] |
344f40733eca3ac6ac71cb4a28e8a6fd6f4b1f5d8822b5e2731eef251acd8e7f
|
#!/usr/bin/python
#
# Copyright 2015 John Kendrick
#
# This file is part of PDielec
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the MIT License
# along with this program, if not see https://opensource.org/licenses/MIT
#
"""Read the contents of a directory containg Phonopy input and output files"""
import numpy as np
from PDielec.GenericOutputReader import GenericOutputReader
class PhonopyOutputReader(GenericOutputReader):
"""Read the contents of a directory containg Phonopy input and output files"""
def __init__(self, names, qmreader):
GenericOutputReader.__init__(self, names)
# We have to use the qm reader to do the reading of the QM files
self.type = 'Phonopy output'
self.qmreader = qmreader
return
def _read_output_files(self):
"""Read the Phonopy files in the directory"""
# Set the qm reader to have all the settings that phonopy reader has
self.qmreader.eckart = self.eckart
self.qmreader.debug = self.debug
# trigger the reading of the qm files
self.qmreader.read_output()
# We don't call self._read_outputfile as this starts looking for keywords
# for f in self._outputfiles:
# self._read_output_file(f)
#
# Instead copy anything useful from the QM calcs to self
self.ncells = self.qmreader.ncells
self.unit_cells = self.qmreader.unit_cells
self.volume = self.qmreader.volume
self.spin = self.qmreader.spin
self.energy_cutoff = self.qmreader.energy_cutoff
self.kpoints = self.qmreader.kpoints
self.kpoint_grid = self.qmreader.kpoint_grid
self.nbands = self.qmreader.nbands
self.nions = self.qmreader.nions
self.ions_per_type = self.qmreader.ions_per_type
self.atom_type_list = self.qmreader.atom_type_list
self.electrons = self.qmreader.electrons
self.magnetization = self.qmreader.magnetization
self.final_energy_without_entropy = self.qmreader.final_energy_without_entropy
self.final_free_energy = self.qmreader.final_free_energy
self.pressure = self.qmreader.pressure
self.masses_per_type = self.qmreader.masses_per_type
self.ions_per_type = self.qmreader.ions_per_type
self.masses = self.qmreader.masses
self.nspecies = self.qmreader.nspecies
self.species = self.qmreader.getSpecies()
self.born_charges = self.qmreader.born_charges
self.zerof_optical_dielectric= self.qmreader.zerof_optical_dielectric
self.zerof_static_dielectric = self.qmreader.zerof_static_dielectric
# Calculate dynamical matrix
self.read_dynamical_matrix()
return
def read_dynamical_matrix(self):
#
# Yaml imports of large files are really slow....
# Attempt to use the PyYaml C parser, using yaml.CLoader
#
import yaml
try:
from yaml import CLoader as Loader
except:
print("WARNING: Yaml CLoader is not avaiable, using fallback")
from yaml import Loader as Loader
# the first name has to be the qpoints file
fd = open(self._outputfiles[0])
data_q = yaml.load(fd, Loader=Loader)
fd.close
# the second name has to be the phonopy file
fd = open(self._outputfiles[1])
data_p = yaml.load(fd, Loader=Loader)
fd.close
self._old_masses = []
for i in range(self.nions):
self._old_masses.append(data_p['primitive_cell']['points'][i]['mass'])
#qpoints = data_q['phonon'][0]['q-position']
# print('q-points',qpoints)
#natom = data_q['natom']
# print('natom:',natom)
dynmat = []
dynmat_data = data_q['phonon'][0]['dynamical_matrix']
for row in dynmat_data:
vals = np.reshape(row, (-1, 2))
dynmat.append(vals[:, 0] + vals[:, 1] * 1j)
dynmat = np.array(dynmat)
# Make sure the hessian is real
hessian = np.real(dynmat)
# We need to convert to cm-1
conversion_factor_to_THz = 15.633302
conversion_factor_to_cm1 = conversion_factor_to_THz * 33.35641
conv = conversion_factor_to_cm1
hessian = hessian * conv * conv
# Find its eigenvalues and eigen vectors
eig_val, eig_vec = np.linalg.eigh(hessian)
self.mass_weighted_normal_modes = []
nmodes = 3*self.nions
# Store the new frequencies, using the negative convention for imaginary modes
frequencies_a = np.sqrt(np.abs(eig_val.real)) * np.sign(eig_val.real)
self.frequencies = frequencies_a.tolist()
# Store the mass weighted normal modes
for i in range(nmodes):
mode = []
n = 0
for j in range(self.nions):
modea = [eig_vec[n][i], eig_vec[n+1][i], eig_vec[n+2][i]]
n = n + 3
mode.append(modea)
self.mass_weighted_normal_modes.append(mode)
# end for i
return
|
JohnKendrick/PDielec
|
PDielec/PhonopyOutputReader.py
|
Python
|
mit
| 5,676
|
[
"phonopy"
] |
d95b534446508007e17459846987b7fbf312a76fedd7fea0fcbf8f16996c0bdb
|
#
# Copyright (C) 2005 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# XXX - TODO:
# add support for DD tags
# add support for HR format tags
import sys
import os
import re
from HTMLParser import HTMLParser
try:
import util
import config
except:
from sabayon import util
from sabayon import config
debug = 0
indent = ' '
bookmark_separator = "/"
TYPE_FOLDER = 1
TYPE_BOOKMARK = 2
TYPE_FOLDER_END = 3
tag_info_dict = {
'dt' : {'implicit_close_event' : ['begin'],
'implicit_close_scope' : ['dl'],
'implicit_close_tags' : ['dt', 'dd']},
'dd' : {'implicit_close_event' : ['begin'],
'implicit_close_scope' : ['dl'],
'implicit_close_tags' : ['dd']},
'dl' : {'implicit_close_event' : ['begin', 'end'],
'implicit_close_scope' : ['dl'],
'implicit_close_tags' : ['dt', 'dd']},
'p' : {'simple_tag' : True},
'hr' : {'simple_tag' : True},
}
# FIXME: these should be defined one place; see mozillasource.py
LOG_OPERATION = 0x00001
LOG_CHANGE = 0x00002
LOG_IGNORED_CHANGE = 0x00004
LOG_APPLY = 0x00008
LOG_SYNC = 0x00010
LOG_PARSE = 0x00020
LOG_PREF = 0x00040
LOG_FILE_CONTENTS = 0x00080
LOG_DATA = 0x00100
LOG_VERBOSE = 0x10000
def dprint(mask, fmt, *args):
# FIXME: before debuglog was introduced, we could use the mask to filter
# which messages to log. Now we don't use it anymore. Is it still useful?
# If you change this, synchronize it with mozillasource.py
debuglog.debug_log (False, debuglog.DEBUG_LOG_DOMAIN_MOZILLA_SOURCE, fmt % args)
class Bookmark:
def __init__(self, folder, name):
self.folder = folder
self.name = name
self.attrs = {}
def get_attr(self, name):
return self.attrs.get(name, None)
def get_url(self):
return self.attrs.get("href", None)
def path(self):
path = self.folder.path()
path.append(self)
return path
def path_as_names(self, join=None):
path = self.folder.path_as_names()
path.append(self.name)
if join == None:
return path
else:
return join.join(path)
def path_as_string(self):
return self.path_as_names(bookmark_separator)
class BookmarkFolder:
def __init__(self, name, parent):
self.reset(name, parent)
def reset(self, name, parent):
self.name = name
self.parent = parent
self.attrs = {}
self.entries = []
def entry_index(self, entry):
n_entries = len(self.entries)
i = 0
while (i < n_entries):
if self.entries[i] == entry:
return i
i += 1
return None
def add_entry(self, entry):
self.entries.append(entry)
return entry
def add_folder(self, folder):
if not isinstance(folder, BookmarkFolder):
folder = BookmarkFolder(folder, self)
self.entries.append(folder)
return folder
def lookup_folder(self, folder):
for entry in self.entries:
if isinstance(entry, BookmarkFolder):
if entry == folder:
return entry
return None
def add_bookmark(self, bookmark):
if not isinstance(bookmark, Bookmark):
bookmark = Bookmark(self, bookmark)
self.entries.append(bookmark)
return bookmark
def lookup_bookmark(self, bookmark):
for entry in self.entries:
if isinstance(entry, Bookmark):
if entry == bookmark:
return entry
return None
def lookup_path(self, path):
path_len = len(path)
i = 0
folder = self
while i < path_len - 1:
folder = folder.lookup_folder(path[i])
if not folder:
return None
i += 1
entry_index = folder.entry_index(path[i])
if entry_index == None:
return None
else:
return folder.entries[entry_index]
def add_path_entry(self, path, entry):
path_len = len(path)
i = 0
parent = folder = self
while i < path_len - 1:
folder = parent.lookup_folder(path[i])
if not folder:
folder = parent.add_folder(path[i])
parent = folder
i += 1
if folder.entry_index(path[i]) == None:
folder.add_entry(path[i])
def set_attr(self, name, value):
self.attrs[name] = value
def get_attr(self, name):
return self.attrs.get(name, None)
def get_url(self):
return self.attrs.get("href", None)
def path(self):
path = [self]
folder = self
parent = self.parent
while parent:
path.append(parent)
parent = parent.parent
path.reverse()
return path
def path_as_names(self, join=None):
path = self.path()
path = [ p.name for p in path ]
if join == None:
return path
else:
return join.join(path)
def path_as_string(self):
return self.path_as_names(bookmark_separator)
def _traverse(self, visit_func, path, data):
assert isinstance(self, BookmarkFolder)
path.append(self)
for entry in self.entries:
if isinstance(entry, BookmarkFolder):
visit_func(entry, TYPE_FOLDER, path, data)
entry._traverse(visit_func, path, data)
elif isinstance(entry, Bookmark):
visit_func(entry, TYPE_BOOKMARK, path, data)
else:
raise ValueError
path.pop()
visit_func(self, TYPE_FOLDER_END, path, data)
def traverse(self, visit_func, data=None):
path = []
self._traverse(visit_func, path, data)
def find_bookmark(self, name):
result = []
def visit(entry, type, path, data):
if type == TYPE_BOOKMARK:
if entry.name == name:
result.append(entry)
self.traverse(visit)
return result
# ----------------------------------
class HTMLTag:
def __init__(self, tag):
self.tag = tag
self.attrs = {}
self.data = ""
class BookmarkHTMLParser(HTMLParser):
def __init__(self, root=None):
HTMLParser.__init__(self)
self.stack = [HTMLTag("None")]
self.folder_root = root
self.cur_folder = self.folder_root
def set_root(self, root):
self.folder_root = root
def get_root(self):
return self.folder_root
def stack_to_string(self):
return "%s" % [ s.tag for s in self.stack ]
def find_tag_on_stack(self, tag):
i = len(self.stack) - 1
while i >= 0:
if self.stack[i].tag == tag:
return self.stack[i]
i -= 1
return None
def implicit_close(self, event, tag):
tag_info = tag_info_dict.get(tag, None)
if not tag_info:
return
implicit_close_event = tag_info.get('implicit_close_event', None)
if not implicit_close_event or not event in implicit_close_event:
return
implicit_close_scope = tag_info.get('implicit_close_scope', None)
implicit_close_tags = tag_info.get('implicit_close_tags', None)
if not (implicit_close_scope or implicit_close_tags):
return
scope_index = len(self.stack) - 1
while scope_index >= 0:
if self.stack[scope_index].tag in implicit_close_scope:
break
scope_index = scope_index - 1
i = scope_index + 1
while i < len(self.stack):
if self.stack[i].tag in implicit_close_tags:
break
i = i + 1
j = len(self.stack) - 1
while (j >= i):
self._handle_endtag(self.stack[j].tag)
j = j - 1
def handle_starttag(self, tag, attrs):
self.implicit_close('begin', tag)
tag_info = tag_info_dict.get(tag, None)
if not tag_info:
simple_tag = False
else:
simple_tag = tag_info.get('simple_tag', False)
if not simple_tag:
top = HTMLTag(tag)
for attr, value in attrs:
top.attrs[attr] = value
self.stack.append(top)
def _handle_endtag(self, tag):
top = self.stack.pop();
if tag == "a":
bookmark = self.cur_folder.add_bookmark(top.data)
for attr, value in top.attrs.items():
bookmark.attrs[attr] = value
if debug:
print "%sBookmark %s" % (indent*(len(self.cur_folder.path())),top.data)
elif top.tag == 'h3' or top.tag == 'h1':
# Folders are contained in a <DT><H3 attrs>name</H3> sequence
# Note, this is currently the only use of the H3 tag in a bookmark
# file so rather than looking for the aforementioned sequence an
# easy "hack" is to just look for an H3 tag, its attrs, and its
# data will be the folder name. Note <H1> is reserved for the
# root folder.
#
# Since this is a new folder, we add it as a folder to the
# currently open folder, it is effectively a push of the folder
# stack, but we maintain it as simply the currently open folder.
if top.tag == 'h3':
if self.cur_folder:
self.cur_folder = self.cur_folder.add_folder(top.data)
else:
self.cur_folder = self.folder_root
else:
# Tag is h1, must be the root folder
self.folder_root.reset(top.data, None)
self.cur_folder = self.folder_root
for attr, value in top.attrs.items():
self.cur_folder.attrs[attr] = value
if debug:
print "%sPUSH Folder %s" % (indent*(len(self.cur_folder.path())-1),self.cur_folder.name)
elif top.tag == 'dl':
# Closing current folder, effectively pop it off the folder stack,
# the currently open folder is replaced by this folders parent.
if debug:
print "%sPOP Folder %s" % (indent*(len(self.cur_folder.path())-1),self.cur_folder.name)
self.cur_folder = self.cur_folder.parent
else:
pass
def handle_endtag(self, tag):
self.implicit_close('end', tag)
# assert tag == self.stack[-1].tag
self._handle_endtag(tag)
def handle_data(self, data):
tag = self.stack[-1]
data = data.strip()
tag.data = tag.data + data
# -----------------------
def visit(entry, type, path, data=None):
max_len = 80
level = len(path)-1
if type == TYPE_FOLDER:
print "%sFolder: %s(%s) path = [%s]" % (indent*level,
entry.name[0:max_len],
data, entry.path_as_string())
elif type == TYPE_BOOKMARK:
print "%sBookmark: %s" % (indent*(level), entry.name[0:max_len])
elif type == TYPE_FOLDER_END:
pass
else:
raise ValueError
for attr, value in entry.attrs.items():
print "%sAttr: %s = %s" % (indent*(level+1), attr, value[0:max_len])
# -----------------------
if __name__ == "__main__":
bm_root = BookmarkFolder('bm', None)
bm_file = BookmarkHTMLParser()
bm_file.set_root(bm_root)
bm_file.feed(open('bookmarks.html').read())
bm_file.close()
|
GNOME/sabayon
|
lib/mozilla_bookmarks.py
|
Python
|
gpl-2.0
| 12,507
|
[
"VisIt"
] |
c490b81bc13c692abe74fe131909814c6eb4ee6cab8443f91dd04aa118db2006
|
# libxc: svn version 4179
# http://www.tddft.org/programs/octopus/wiki/index.php/Libxc
libxc_functionals = {
'LDA_X': 1,
'LDA_C_WIGNER': 2,
'LDA_C_RPA': 3,
'LDA_C_HL': 4,
'LDA_C_GL': 5,
'LDA_C_XALPHA': 6,
'LDA_C_VWN': 7,
'LDA_C_VWN_RPA': 8,
'LDA_C_PZ': 9,
'LDA_C_PZ_MOD': 10,
'LDA_C_OB_PZ': 11,
'LDA_C_PW': 12,
'LDA_C_PW_MOD': 13,
'LDA_C_OB_PW': 14,
'LDA_C_AMGB': 15,
'LDA_XC_TETER93': 20,
'GGA_X_PBE': 101,
'GGA_X_PBE_R': 102,
'GGA_X_B86': 103,
'GGA_X_B86_R': 104,
'GGA_X_B86_MGC': 105,
'GGA_X_B88': 106,
'GGA_X_G96': 107,
'GGA_X_PW86': 108,
'GGA_X_PW91': 109,
'GGA_X_OPTX': 110,
'GGA_X_DK87_R1': 111,
'GGA_X_DK87_R2': 112,
'GGA_X_LG93': 113,
'GGA_X_FT97_A': 114,
'GGA_X_FT97_B': 115,
'GGA_X_PBE_SOL': 116,
'GGA_X_RPBE': 117,
'GGA_X_WC': 118,
'GGA_X_mPW91': 119,
'GGA_X_AM05': 120,
'GGA_X_PBEA': 121,
'GGA_X_MPBE': 122,
'GGA_X_XPBE': 123,
'GGA_X_OPTPBE': 124,
'GGA_X_OPTB88': 125,
'GGA_X_C09': 126,
'GGA_C_PBE': 130,
'GGA_C_LYP': 131,
'GGA_C_P86': 132,
'GGA_C_PBE_SOL': 133,
'GGA_C_PW91': 134,
'GGA_C_AM05': 135,
'GGA_C_XPBE': 136,
'GGA_C_PBE_REVTPSS': 137,
'GGA_XC_LB': 160,
'GGA_XC_HCTH_93': 161,
'GGA_XC_HCTH_120': 162,
'GGA_XC_HCTH_147': 163,
'GGA_XC_HCTH_407': 164,
'GGA_XC_EDF1': 165,
'GGA_XC_XLYP': 166,
'HYB_GGA_XC_B3PW91': 401,
'HYB_GGA_XC_B3LYP': 402,
'HYB_GGA_XC_B3P86': 403,
'HYB_GGA_XC_O3LYP': 404,
'HYB_GGA_XC_PBEH': 406,
'HYB_GGA_XC_X3LYP': 411,
'HYB_GGA_XC_B1WC': 412,
'MGGA_X_TPSS': 201,
'MGGA_C_TPSS': 202,
'MGGA_X_M06L': 203,
'MGGA_C_M06L': 204,
'MGGA_X_REVTPSS': 205,
'MGGA_C_REVTPSS': 206
}
|
ajylee/gpaw-rtxs
|
gpaw/xc/libxc_functionals.py
|
Python
|
gpl-3.0
| 1,797
|
[
"Octopus"
] |
4f0dddfc957fc8bc0a2ced6e00e79005a50eb13255661dfbeacd9782ff552a0a
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010- Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
EventPage - Event index page and individual Event pages
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from collections import defaultdict
from operator import itemgetter
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (Date, Event)
from gramps.gen.plug.report import Bibliography
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (get_first_letters, _ALPHAEVENT,
_EVENTMAP, alphabet_navigation,
FULLCLEAR, sort_event_types,
primary_difference,
get_index_letter)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
#################################################
#
# creates the Event List Page and EventPages
#
#################################################
class EventPages(BasePage):
"""
This class is responsible for displaying information about the 'Person'
database objects. It displays this information under the 'Events'
tab. It is told by the 'add_instances' call which 'Person's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Event List (Index) page and all the Event
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
self.event_handle_list = []
self.event_types = []
self.event_dict = defaultdict(set)
def display_pages(self, title):
"""
Generate and output the pages under the Event tab, namely the event
index and the individual event pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Event]")
for item in self.report.obj_dict[Event].items():
LOG.debug(" %s", str(item))
event_handle_list = self.report.obj_dict[Event].keys()
event_types = []
for event_handle in event_handle_list:
event = self.r_db.get_event_from_handle(event_handle)
event_types.append(self._(event.get_type().xml_str()))
message = _("Creating event pages")
with self.r_user.progress(_("Narrated Web Site Report"), message,
len(event_handle_list) + 1
) as step:
index = 1
for event_handle in event_handle_list:
step()
index += 1
self.eventpage(self.report, title, event_handle)
step()
self.eventlistpage(self.report, title, event_types,
event_handle_list)
def eventlistpage(self, report, title, event_types, event_handle_list):
"""
Will create the event list page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: event_types -- A list of the type in the events database
@param: event_handle_list -- A list of event handles
"""
BasePage.__init__(self, report, title)
ldatec = 0
prev_letter = " "
output_file, sio = self.report.create_file("events")
result = self.write_header(self._("Events"))
eventslistpage, dummy_head, dummy_body, outerwrapper = result
# begin events list division
with Html("div", class_="content", id="EventList") as eventlist:
outerwrapper += eventlist
msg = self._("This page contains an index of all the events in the "
"database, sorted by their type and date (if one is "
"present). Clicking on an event’s Gramps ID "
"will open a page for that event.")
eventlist += Html("p", msg, id="description")
# get alphabet navigation...
index_list = get_first_letters(self.r_db, event_types,
_ALPHAEVENT)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav:
eventlist += alpha_nav
# begin alphabet event table
with Html("table",
class_="infolist primobjlist alphaevent") as table:
eventlist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [(self._("Letter"),
"ColumnRowLabel"),
(self._("Type"), "ColumnType"),
(self._("Date"), "ColumnDate"),
(self._("Gramps ID"),
"ColumnGRAMPSID"),
(self._("Person"), "ColumnPerson")
]
)
tbody = Html("tbody")
table += tbody
# separate events by their type and then thier event handles
for (evt_type,
data_list) in sort_event_types(self.r_db,
event_types,
event_handle_list,
self.rlocale):
first = True
_event_displayed = []
# sort datalist by date of event and by event handle...
data_list = sorted(data_list, key=itemgetter(0, 1))
first_event = True
for (dummy_sort_value, event_handle) in data_list:
event = self.r_db.get_event_from_handle(event_handle)
_type = event.get_type()
gid = event.get_gramps_id()
if event.get_change_time() > ldatec:
ldatec = event.get_change_time()
# check to see if we have listed this gramps_id yet?
if gid not in _event_displayed:
# family event
if int(_type) in _EVENTMAP:
handle_list = set(
self.r_db.find_backlink_handles(
event_handle,
include_classes=['Family', 'Person']))
else:
handle_list = set(
self.r_db.find_backlink_handles(
event_handle,
include_classes=['Person']))
if handle_list:
trow = Html("tr")
tbody += trow
# set up hyperlinked letter for
# alphabet_navigation
tcell = Html("td", class_="ColumnLetter",
inline=True)
trow += tcell
if evt_type and not evt_type.isspace():
letter = get_index_letter(
self._(str(evt_type)[0].capitalize()),
index_list, self.rlocale)
else:
letter = " "
if first or primary_difference(letter,
prev_letter,
self.rlocale):
first = False
prev_letter = letter
t_a = 'class = "BeginLetter BeginType"'
trow.attr = t_a
ttle = self._("Event types beginning "
"with letter %s") % letter
tcell += Html("a", letter, name=letter,
id_=letter, title=ttle,
inline=True)
else:
tcell += " "
# display Event type if first in the list
tcell = Html("td", class_="ColumnType",
title=self._(evt_type),
inline=True)
trow += tcell
if first_event:
tcell += self._(evt_type)
if trow.attr == "":
trow.attr = 'class = "BeginType"'
else:
tcell += " "
# event date
tcell = Html("td", class_="ColumnDate",
inline=True)
trow += tcell
date = Date.EMPTY
if event:
date = event.get_date_object()
if date and date is not Date.EMPTY:
tcell += self.rlocale.get_date(date)
else:
tcell += " "
# Gramps ID
trow += Html("td", class_="ColumnGRAMPSID") + (
self.event_grampsid_link(event_handle,
gid, None)
)
# Person(s) column
tcell = Html("td", class_="ColumnPerson")
trow += tcell
# classname can either be a person or a family
first_person = True
# get person(s) for ColumnPerson
sorted_list = sorted(handle_list)
self.complete_people(tcell, first_person,
sorted_list,
uplink=False)
_event_displayed.append(gid)
first_event = False
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
outerwrapper += (FULLCLEAR, footer)
# send page ut for processing
# and close the file
self.xhtml_writer(eventslistpage, output_file, sio, ldatec)
def _geteventdate(self, event_handle):
"""
Get the event date
@param: event_handle -- The handle for the event to use
"""
event_date = Date.EMPTY
event = self.r_db.get_event_from_handle(event_handle)
if event:
date = event.get_date_object()
if date:
# returns the date in YYYY-MM-DD format
return Date(date.get_year_calendar("Gregorian"),
date.get_month(), date.get_day())
# return empty date string
return event_date
def event_grampsid_link(self, handle, grampsid, uplink):
"""
Create a hyperlink from event handle, but show grampsid
@param: handle -- The handle for the event
@param: grampsid -- The gramps ID to display
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
url = self.report.build_url_fname_html(handle, "evt", uplink)
# return hyperlink to its caller
return Html("a", grampsid, href=url, title=grampsid, inline=True)
def eventpage(self, report, title, event_handle):
"""
Creates the individual event page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: event_handle -- The event handle for the database
"""
event = report.database.get_event_from_handle(event_handle)
BasePage.__init__(self, report, title, event.get_gramps_id())
if not event:
return
ldatec = event.get_change_time()
event_media_list = event.get_media_list()
self.uplink = True
subdirs = True
evt_type = self._(event.get_type().xml_str())
self.page_title = "%(eventtype)s" % {'eventtype' : evt_type}
self.bibli = Bibliography()
output_file, sio = self.report.create_file(event_handle, "evt")
result = self.write_header(self._("Events"))
eventpage, dummy_head, dummy_body, outerwrapper = result
# start event detail division
with Html("div", class_="content", id="EventDetail") as eventdetail:
outerwrapper += eventdetail
thumbnail = self.disp_first_img_as_thumbnail(event_media_list,
event)
if thumbnail is not None:
eventdetail += thumbnail
# display page title
eventdetail += Html("h3", self.page_title, inline=True)
# begin eventdetail table
with Html("table", class_="infolist eventlist") as table:
eventdetail += table
tbody = Html("tbody")
table += tbody
evt_gid = event.get_gramps_id()
if not self.noid and evt_gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"),
class_="ColumnAttribute", inline=True),
Html("td", evt_gid,
class_="ColumnGRAMPSID", inline=True)
)
tbody += trow
# get event data
#
# for more information: see get_event_data()
#
event_data = self.get_event_data(event, event_handle,
subdirs, evt_gid)
for (label, colclass, data) in event_data:
if data:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute",
inline=True),
Html('td', data, class_="Column" + colclass)
)
tbody += trow
# Narrative subsection
notelist = event.get_note_list()
notelist = self.display_note_list(notelist, Event)
if notelist is not None:
eventdetail += notelist
# get attribute list
attrlist = event.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
eventdetail += attrsection
# event source references
srcrefs = self.display_ind_sources(event)
if srcrefs is not None:
eventdetail += srcrefs
# display additional images as gallery
if self.create_media:
addgallery = self.disp_add_img_as_gallery(event_media_list,
event)
if addgallery:
eventdetail += addgallery
# References list
ref_list = self.display_bkref_list(Event, event_handle)
if ref_list is not None:
eventdetail += ref_list
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
outerwrapper += (FULLCLEAR, footer)
# send page out for processing
# and close the page
self.xhtml_writer(eventpage, output_file, sio, ldatec)
|
sam-m888/gramps
|
gramps/plugins/webreport/event.py
|
Python
|
gpl-2.0
| 19,096
|
[
"Brian"
] |
280caa0da464c487f6a38534cb021c9472a0d3f5324e5b6397b0268ef87303d1
|
import numpy as np
from sympy import Rational as frac
from sympy import sqrt
from ..helpers import article
from ..un._mysovskikh import get_nsimplex_points
from ._helpers import Enr2Scheme
from ._phillips import phillips as lu_darmofal_3
from ._stroud import stroud_enr2_5_1a as lu_darmofal_4a
from ._stroud import stroud_enr2_5_1b as lu_darmofal_4b
from ._stroud_secrest import stroud_secrest_4 as lu_darmofal_2
source = article(
authors=["James Lu", "David L. Darmofal"],
title="Higher-Dimensional Integration with Gaussian Weight for Applications in Probabilistic Design",
journal="SIAM J. Sci. Comput.",
volume="26",
number="2",
year="2004",
pages="613–624",
url="https://doi.org/10.1137/S1064827503426863",
)
def lu_darmofal_1(n):
# ENH The article says n>=4, but the scheme also works for 2, 3
assert n >= 2
a = get_nsimplex_points(n, sqrt, frac)
b = np.array(
[
sqrt(frac(n, 2 * (n - 1))) * (a[k] + a[l])
for k in range(len(a))
for l in range(k)
]
)
points = np.concatenate(
[
[[0] * n],
+sqrt(frac(n, 2) + 1) * a,
-sqrt(frac(n, 2) + 1) * a,
+sqrt(frac(n, 2) + 1) * b,
-sqrt(frac(n, 2) + 1) * b,
]
)
points = np.ascontiguousarray(points.T)
p = frac(2, n + 2)
A = frac(n ** 2 * (7 - n), 2 * (n + 1) ** 2 * (n + 2) ** 2)
B = frac(2 * (n - 1) ** 2, (n + 1) ** 2 * (n + 2) ** 2)
weights = np.concatenate(
[
[p],
np.full(len(a), A),
np.full(len(a), A),
np.full(len(b), B),
np.full(len(b), B),
]
)
return Enr2Scheme("Lu-Darmofal I", n, weights, points, 5, source)
__all__ = [
"lu_darmofal_1",
"lu_darmofal_2",
"lu_darmofal_3",
"lu_darmofal_4a",
"lu_darmofal_4b",
]
|
nschloe/quadpy
|
src/quadpy/enr2/_lu_darmofal.py
|
Python
|
mit
| 1,887
|
[
"Gaussian"
] |
b7baf4e6348a60cd9267f21db6242167b9d618e38f83ce007a8623c1283a4c50
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Tableview"""
import logging
logger = logging.getLogger( 'camelot.view.controls.tableview' )
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QSizePolicy
from camelot.view.proxy.queryproxy import QueryTableProxy
from camelot.view.controls.view import AbstractView
from camelot.view.controls.user_translatable_label import UserTranslatableLabel
from camelot.view.model_thread import post
from camelot.view.model_thread import gui_function
from camelot.view.model_thread import model_function
from camelot.view import register
from camelot.core.utils import ugettext as _
from search import SimpleSearchControl
class FrozenTableWidget( QtGui.QTableView ):
"""A table widget to be used as the frozen table widget inside a table
widget."""
def __init__(self, parent, columns_frozen):
super(FrozenTableWidget, self).__init__(parent)
self.setSelectionBehavior( QtGui.QAbstractItemView.SelectRows )
self.setEditTriggers( QtGui.QAbstractItemView.SelectedClicked |
QtGui.QAbstractItemView.DoubleClicked )
self._columns_frozen = columns_frozen
@QtCore.pyqtSlot(QtCore.QModelIndex, QtCore.QModelIndex)
def currentChanged(self, current, previous):
"""When the current index has changed, prevent it to jump to
a column that is not frozen"""
if current.column() >= self._columns_frozen:
current = self.model().index( current.row(), -1 )
if previous.column() >= self._columns_frozen:
previous = self.model().index( previous.row(), -1 )
super(FrozenTableWidget, self).currentChanged(current, previous)
class TableWidget( QtGui.QTableView ):
"""A widget displaying a table, to be used within a TableView
.. attribute:: margin
margin, specified as a number of pixels, used to calculate the height of a row
in the table, the minimum row height will allow for this number of pixels below
and above the text.
"""
margin = 5
def __init__( self, parent = None, columns_frozen = 0, lines_per_row = 1 ):
"""
:param columns_frozen: the number of columns on the left that don't scroll
:param lines_per_row: the number of lines of text that should be viewable in a single row.
"""
QtGui.QTableView.__init__( self, parent )
logger.debug( 'create TableWidget' )
self._columns_frozen = columns_frozen
self.setSelectionBehavior( QtGui.QAbstractItemView.SelectRows )
self.setEditTriggers( QtGui.QAbstractItemView.SelectedClicked |
QtGui.QAbstractItemView.DoubleClicked |
QtGui.QAbstractItemView.CurrentChanged )
self.setSizePolicy( QSizePolicy.Expanding, QSizePolicy.Expanding )
self.horizontalHeader().setClickable( True )
self._header_font_required = QtGui.QApplication.font()
self._header_font_required.setBold( True )
line_height = QtGui.QFontMetrics(QtGui.QApplication.font()).lineSpacing()
self._minimal_row_height = line_height * lines_per_row + 2*self.margin
self.verticalHeader().setDefaultSectionSize( self._minimal_row_height )
self.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.horizontalHeader().sectionClicked.connect(
self.horizontal_section_clicked )
if columns_frozen:
frozen_table_view = FrozenTableWidget(self, columns_frozen)
frozen_table_view.setObjectName( 'frozen_table_view' )
frozen_table_view.verticalHeader().setDefaultSectionSize( self._minimal_row_height )
frozen_table_view.verticalHeader().hide()
frozen_table_view.horizontalHeader().setResizeMode(QtGui.QHeaderView.Fixed)
frozen_table_view.horizontalHeader().sectionClicked.connect(
self.horizontal_section_clicked )
self.horizontalHeader().sectionResized.connect( self._update_section_width )
self.verticalHeader().sectionResized.connect( self._update_section_height )
frozen_table_view.verticalScrollBar().valueChanged.connect( self.verticalScrollBar().setValue )
self.verticalScrollBar().valueChanged.connect( frozen_table_view.verticalScrollBar().setValue )
self.viewport().stackUnder(frozen_table_view)
frozen_table_view.setStyleSheet("QTableView { border: none;}")
frozen_table_view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
frozen_table_view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
frozen_table_view.show()
frozen_table_view.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
@QtCore.pyqtSlot(int, int, int)
def _update_section_width(self, logical_index, _int, new_size):
frozen_table_view = self.findChild(QtGui.QWidget, 'frozen_table_view' )
if logical_index<self._columns_frozen and frozen_table_view:
frozen_table_view.setColumnWidth( logical_index, new_size)
self._update_frozen_table()
@QtCore.pyqtSlot(int, int, int)
def _update_section_height(self, logical_index, _int, new_size):
frozen_table_view = self.findChild(QtGui.QWidget, 'frozen_table_view' )
if frozen_table_view:
frozen_table_view.setRowHeight(logical_index, new_size)
def setItemDelegate(self, item_delegate):
super(TableWidget, self).setItemDelegate(item_delegate)
frozen_table_view = self.findChild(QtGui.QWidget, 'frozen_table_view' )
if frozen_table_view:
frozen_table_view.setItemDelegate(item_delegate)
def resizeEvent(self, event):
super(TableWidget, self).resizeEvent(event)
self._update_frozen_table()
def moveCursor(self, cursorAction, modifiers):
current = super(TableWidget, self).moveCursor(cursorAction, modifiers)
frozen_table_view = self.findChild(QtGui.QWidget, 'frozen_table_view' )
if frozen_table_view:
frozen_width = 0
last_frozen = min(self._columns_frozen, self.model().columnCount())
for column in range(0, last_frozen):
frozen_width += self.columnWidth(column)
if cursorAction == QtGui.QAbstractItemView.MoveLeft and current.column() >= last_frozen and \
self.visualRect(current).topLeft().x() < frozen_width:
new_value = self.horizontalScrollBar().value() + self.visualRect(current).topLeft().x() - frozen_width
self.horizontalScrollBar().setValue(new_value)
return current
def scrollTo(self, index, hint):
if(index.column()>=self._columns_frozen):
super(TableWidget, self).scrollTo(index, hint)
def edit(self, index, trigger=None, event=None):
#
# columns in the frozen part should never be edited, because this might result
# in an editor opening below the frozen column that contains the old value
# which will be committed again when closed
#
if index.column() >= self._columns_frozen:
if trigger==None and event==None:
return super( TableWidget, self ).edit( index )
return super( TableWidget, self ).edit( index, trigger, event )
return False
@QtCore.pyqtSlot()
def _update_frozen_table(self):
frozen_table_view = self.findChild(QtGui.QWidget, 'frozen_table_view' )
if frozen_table_view:
frozen_table_view.setSelectionModel(self.selectionModel())
last_frozen = min(self._columns_frozen, self.model().columnCount())
frozen_width = 0
for column in range(0, last_frozen):
frozen_width += self.columnWidth( column )
frozen_table_view.setColumnWidth( column,
self.columnWidth(column) )
for column in range(last_frozen, self.model().columnCount()):
frozen_table_view.setColumnHidden(column, True)
frozen_table_view.setGeometry( self.verticalHeader().width() + self.frameWidth(),
self.frameWidth(),
frozen_width,
self.viewport().height() + self.horizontalHeader().height() )
@QtCore.pyqtSlot( int )
def horizontal_section_clicked( self, logical_index ):
"""Update the sorting of the model and the header"""
header = self.horizontalHeader()
order = Qt.AscendingOrder
if not header.isSortIndicatorShown():
header.setSortIndicatorShown( True )
elif header.sortIndicatorSection()==logical_index:
# apparently, the sort order on the header is allready switched
# when the section was clicked, so there is no need to reverse it
order = header.sortIndicatorOrder()
header.setSortIndicator( logical_index, order )
self.model().sort( logical_index, order )
def close_editor(self):
"""Close the active editor, this method is used to prevent assertion
failures in QT when an editor is still open in the view for a cell
that no longer exists in the model
thos assertion failures only exist in QT debug builds.
"""
current_index = self.currentIndex()
self.closePersistentEditor( current_index )
def setModel( self, model ):
#
# An editor might be open that is no longer available for the new
# model. Not closing this editor, results in assertion failures
# in qt, resulting in segfaults in the debug build.
#
self.close_editor()
#
# Editor, closed. it should be safe to change the model
#
QtGui.QTableView.setModel( self, model )
frozen_table_view = self.findChild(QtGui.QWidget, 'frozen_table_view' )
if frozen_table_view:
model.layoutChanged.connect( self._update_frozen_table )
frozen_table_view.setModel( model )
self._update_frozen_table()
register.register( model, self )
self.selectionModel().currentChanged.connect( self.activated )
@QtCore.pyqtSlot(QtCore.QModelIndex, QtCore.QModelIndex)
def activated( self, selectedIndex, previousSelectedIndex ):
option = QtGui.QStyleOptionViewItem()
new_size = self.itemDelegate( selectedIndex ).sizeHint( option,
selectedIndex )
row = selectedIndex.row()
if previousSelectedIndex.row() >= 0:
previous_row = previousSelectedIndex.row()
self.setRowHeight( previous_row, self._minimal_row_height )
self.setRowHeight( row, max( new_size.height(),
self._minimal_row_height ) )
class RowsWidget( QtGui.QLabel ):
"""Widget that is part of the header widget, displaying the number of rows
in the table view"""
_number_of_rows_font = QtGui.QApplication.font()
def __init__( self, parent ):
QtGui.QLabel.__init__( self, parent )
self.setFont( self._number_of_rows_font )
def setNumberOfRows( self, rows ):
self.setText( _('(%i rows)')%rows )
class HeaderWidget( QtGui.QWidget ):
"""HeaderWidget for a tableview, containing the title, the search widget,
and the number of rows in the table"""
search_widget = SimpleSearchControl
rows_widget = RowsWidget
filters_changed_signal = QtCore.pyqtSignal()
_title_font = QtGui.QApplication.font()
_title_font.setBold( True )
def __init__( self, parent, admin ):
QtGui.QWidget.__init__( self, parent )
self._admin = admin
layout = QtGui.QVBoxLayout()
widget_layout = QtGui.QHBoxLayout()
search = self.search_widget( self )
search.expand_search_options_signal.connect(
self.expand_search_options )
title = UserTranslatableLabel( admin.get_verbose_name_plural(),
self )
title.setFont( self._title_font )
widget_layout.addWidget( title )
widget_layout.addWidget( search )
if self.rows_widget:
self.number_of_rows = self.rows_widget( self )
widget_layout.addWidget( self.number_of_rows )
else:
self.number_of_rows = None
layout.addLayout( widget_layout )
self._expanded_filters_created = False
self._expanded_search = QtGui.QWidget()
self._expanded_search.hide()
layout.addWidget(self._expanded_search)
self.setLayout( layout )
self.setSizePolicy( QSizePolicy.Minimum, QSizePolicy.Fixed )
self.setNumberOfRows( 0 )
self.search = search
def _fill_expanded_search_options(self, columns):
"""Given the columns in the table view, present the user
with more options to filter rows in the table
:param columns: a list of tuples with field names and attributes
"""
from camelot.view.controls.filter_operator import FilterOperator
layout = QtGui.QHBoxLayout()
for field, attributes in columns:
if 'operators' in attributes and attributes['operators']:
widget = FilterOperator( self._admin.entity,
field, attributes,
self )
widget.filter_changed_signal.connect( self._filter_changed )
layout.addWidget( widget )
layout.addStretch()
self._expanded_search.setLayout( layout )
self._expanded_filters_created = True
def _filter_changed(self):
self.filters_changed_signal.emit()
def decorate_query(self, query):
"""Apply expanded filters on the query"""
if self._expanded_filters_created:
for i in range(self._expanded_search.layout().count()):
if self._expanded_search.layout().itemAt(i).widget():
query = self._expanded_search.layout().itemAt(i).widget().decorate_query(query)
return query
@QtCore.pyqtSlot()
def expand_search_options(self):
if self._expanded_search.isHidden():
if not self._expanded_filters_created:
post( self._admin.get_columns, self._fill_expanded_search_options )
self._expanded_search.show()
else:
self._expanded_search.hide()
@gui_function
def setNumberOfRows( self, rows ):
if self.number_of_rows:
self.number_of_rows.setNumberOfRows( rows )
class TableView( AbstractView ):
"""A generic tableview widget that puts together some other widgets. The behaviour of this class and
the resulting interface can be tuned by specifying specific class attributes which define the underlying
widgets used ::
class MovieRentalTableView(TableView):
title_format = 'Grand overview of recent movie rentals'
The attributes that can be specified are :
.. attribute:: header_widget
The widget class to be used as a header in the table view::
header_widget = HeaderWidget
.. attribute:: table_widget
The widget class used to display a table within the table view ::
table_widget = TableWidget
.. attribute:: title_format
A string used to format the title of the view ::
title_format = '%(verbose_name_plural)s'
.. attribute:: table_model
A class implementing QAbstractTableModel that will be used as a model for the table view ::
table_model = QueryTableProxy
- emits the row_selected signal when a row has been selected
"""
header_widget = HeaderWidget
TableWidget = TableWidget
#
# The proxy class to use
#
table_model = QueryTableProxy
#
# Format to use as the window title
#
title_format = '%(verbose_name_plural)s'
row_selected_signal = QtCore.pyqtSignal(int)
def __init__( self, admin, search_text = None, parent = None ):
super(TableView, self).__init__( parent )
self.admin = admin
post( self.get_title, self.change_title )
widget_layout = QtGui.QVBoxLayout()
if self.header_widget:
self.header = self.header_widget( self, admin )
widget_layout.addWidget( self.header )
self.header.search.search_signal.connect( self.startSearch )
self.header.search.cancel_signal.connect( self.cancelSearch )
if search_text:
self.header.search.search( search_text )
else:
self.header = None
widget_layout.setSpacing( 0 )
widget_layout.setMargin( 0 )
splitter = QtGui.QSplitter( self )
splitter.setObjectName('splitter')
widget_layout.addWidget( splitter )
table_widget = QtGui.QWidget( self )
filters_widget = QtGui.QWidget( self )
self.table_layout = QtGui.QVBoxLayout()
self.table_layout.setSpacing( 0 )
self.table_layout.setMargin( 0 )
self.table = None
self.filters_layout = QtGui.QVBoxLayout()
self.filters_layout.setSpacing( 0 )
self.filters_layout.setMargin( 0 )
self.actions = None
self._table_model = None
table_widget.setLayout( self.table_layout )
filters_widget.setLayout( self.filters_layout )
#filters_widget.hide()
self.set_admin( admin )
splitter.addWidget( table_widget )
splitter.addWidget( filters_widget )
self.setLayout( widget_layout )
self.search_filter = lambda q: q
shortcut = QtGui.QShortcut(QtGui.QKeySequence(QtGui.QKeySequence.Find), self)
shortcut.activated.connect( self.activate_search )
if self.header_widget:
self.header.filters_changed_signal.connect( self.rebuild_query )
# give the table widget focus to prevent the header and its search control to
# receive default focus, as this would prevent the displaying of 'Search...' in the
# search control, but this conflicts with the MDI, resulting in the window not
# being active and the menus not to work properly
#table_widget.setFocus( QtCore.Qt.OtherFocusReason )
#self.setFocusProxy(table_widget)
#self.setFocus( QtCore.Qt.OtherFocusReason )
post( self.admin.get_subclass_tree, self.setSubclassTree )
@QtCore.pyqtSlot()
def activate_search(self):
self.header.search.setFocus(QtCore.Qt.ShortcutFocusReason)
@model_function
def get_title( self ):
return self.title_format % {'verbose_name_plural':self.admin.get_verbose_name_plural()}
@QtCore.pyqtSlot(list)
@gui_function
def setSubclassTree( self, subclasses ):
if len( subclasses ) > 0:
from inheritance import SubclassTree
splitter = self.findChild(QtGui.QWidget, 'splitter' )
class_tree = SubclassTree( self.admin, splitter )
splitter.insertWidget( 0, class_tree )
class_tree.subclass_clicked_signal.connect( self.set_admin )
@QtCore.pyqtSlot(int)
def sectionClicked( self, section ):
"""emits a row_selected signal"""
self.row_selected_signal.emit( section )
def copy_selected_rows( self ):
"""Copy the selected rows in this tableview"""
logger.debug( 'delete selected rows called' )
if self.table and self._table_model:
for row in set( map( lambda x: x.row(), self.table.selectedIndexes() ) ):
self._table_model.copy_row( row )
def select_all_rows( self ):
self.table.selectAll()
def create_table_model( self, admin ):
"""Create a table model for the given admin interface"""
return self.table_model( admin,
None,
admin.get_columns )
def get_admin(self):
return self.admin
def get_model(self):
return self._table_model
@QtCore.pyqtSlot( object )
@gui_function
def set_admin( self, admin ):
"""Switch to a different subclass, where admin is the admin object of the
subclass"""
logger.debug('set_admin called')
self.admin = admin
if self.table:
self._table_model.layoutChanged.disconnect( self.tableLayoutChanged )
self.table_layout.removeWidget(self.table)
self.table.deleteLater()
self._table_model.deleteLater()
splitter = self.findChild( QtGui.QWidget, 'splitter' )
self.table = self.TableWidget( splitter,
self.admin.list_columns_frozen,
lines_per_row = self.admin.lines_per_row )
self._table_model = self.create_table_model( admin )
self.table.setModel( self._table_model )
self.table.verticalHeader().sectionClicked.connect( self.sectionClicked )
self._table_model.layoutChanged.connect( self.tableLayoutChanged )
self.tableLayoutChanged()
self.table_layout.insertWidget( 1, self.table )
def get_filters_and_actions():
return ( admin.get_filters(), admin.get_list_actions() )
post( get_filters_and_actions, self.set_filters_and_actions )
@QtCore.pyqtSlot()
@gui_function
def tableLayoutChanged( self ):
logger.debug('tableLayoutChanged')
if self.header:
self.header.setNumberOfRows( self._table_model.rowCount() )
item_delegate = self._table_model.getItemDelegate()
if item_delegate:
self.table.setItemDelegate( item_delegate )
for i in range( self._table_model.columnCount() ):
self.table.setColumnWidth( i, self._table_model.headerData( i, Qt.Horizontal, Qt.SizeHintRole ).toSize().width() )
def deleteSelectedRows( self ):
"""delete the selected rows in this tableview"""
logger.debug( 'delete selected rows called' )
confirmation_message = self.admin.get_confirm_delete()
confirmed = True
if confirmation_message:
if QtGui.QMessageBox.question(self,
_('Please confirm'),
unicode(confirmation_message),
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
confirmed = False
if confirmed:
rows = set( index.row() for index in self.table.selectedIndexes() )
self._table_model.remove_rows( set( rows ) )
@gui_function
def newRow( self ):
"""Create a new row in the tableview"""
from camelot.view.workspace import show_top_level
form = self.admin.create_new_view( parent = None,
oncreate = lambda o:self._table_model.insertEntityInstance( 0, o ),
onexpunge = self._table_model.remove_objects )
show_top_level( form, self )
def closeEvent( self, event ):
"""reimplements close event"""
logger.debug( 'tableview closed' )
event.accept()
def selectTableRow( self, row ):
"""selects the specified row"""
self.table.selectRow( row )
def makeImport(self):
pass
# for row in data:
# o = self.admin.entity()
# #For example, setattr(x, 'foobar', 123) is equivalent to x.foobar = 123
# # if you want to import all attributes, you must link them to other objects
# #for example: a movie has a director, this isn't a primitive like a string
# # but a object fetched from the db
# setattr(o, object_attributes[0], row[0])
# name = row[2].split( ' ' ) #director
# o.short_description = "korte beschrijving"
# o.genre = ""
# from sqlalchemy.orm.session import Session
# Session.object_session(o).flush([o])
#
# post( makeImport )
def selectedTableIndexes( self ):
"""returns a list of selected rows indexes"""
return self.table.selectedIndexes()
def getColumns( self ):
"""return the columns to be displayed in the table view"""
return self.admin.get_columns()
def getData( self ):
"""generator for data queried by table model"""
for d in self._table_model.getData():
yield d
def getTitle( self ):
"""return the name of the entity managed by the admin attribute"""
return self.admin.get_verbose_name()
def viewFirst( self ):
"""selects first row"""
self.selectTableRow( 0 )
def viewLast( self ):
"""selects last row"""
self.selectTableRow( self._table_model.rowCount() - 1 )
def viewNext( self ):
"""selects next row"""
first = self.selectedTableIndexes()[0]
next = ( first.row() + 1 ) % self._table_model.rowCount()
self.selectTableRow( next )
def viewPrevious( self ):
"""selects previous row"""
first = self.selectedTableIndexes()[0]
prev = ( first.row() - 1 ) % self._table_model.rowCount()
self.selectTableRow( prev )
@QtCore.pyqtSlot(object)
def _set_query(self, query_getter):
if isinstance(self._table_model, QueryTableProxy):
self._table_model.setQuery(query_getter)
self.table.clearSelection()
@QtCore.pyqtSlot()
def refresh(self):
"""Refresh the whole view"""
post( self.get_admin, self.set_admin )
@QtCore.pyqtSlot()
def rebuild_query( self ):
"""resets the table model query"""
from filterlist import FilterList
def rebuild_query():
query = self.admin.get_query()
# a table view is not required to have a header
if self.header:
query = self.header.decorate_query(query)
filters = self.findChild(FilterList, 'filters')
if filters:
query = filters.decorate_query( query )
if self.search_filter:
query = self.search_filter( query )
query_getter = lambda:query
return query_getter
post( rebuild_query, self._set_query )
@QtCore.pyqtSlot(str)
def startSearch( self, text ):
"""rebuilds query based on filtering text"""
from camelot.view.search import create_entity_search_query_decorator
logger.debug( 'search %s' % text )
self.search_filter = create_entity_search_query_decorator( self.admin, unicode(text) )
self.rebuild_query()
@QtCore.pyqtSlot()
def cancelSearch( self ):
"""resets search filtering to default"""
logger.debug( 'cancel search' )
self.search_filter = lambda q: q
self.rebuild_query()
@model_function
def get_selection(self):
""":return: a list with all the objects corresponding to the selected rows in the
table """
selection = []
for row in set( map( lambda x: x.row(), self.table.selectedIndexes() ) ):
selection.append( self._table_model._get_object(row) )
return selection
@model_function
def get_collection(self):
""":return: a list with all the objects corresponding to the rows in the table
"""
return self._table_model.get_collection()
@QtCore.pyqtSlot(tuple)
@gui_function
def set_filters_and_actions( self, filters_and_actions ):
"""sets filters for the tableview"""
filters, actions = filters_and_actions
from camelot.view.controls.filterlist import FilterList
from camelot.view.controls.actionsbox import ActionsBox
logger.debug( 'setting filters for tableview' )
filters_widget = self.findChild(FilterList, 'filters')
if filters_widget:
filters_widget.filters_changed_signal.disconnect( self.rebuild_query )
self.filters_layout.removeWidget(filters_widget)
filters_widget.deleteLater()
if self.actions:
self.filters_layout.removeWidget(self.actions)
self.actions.deleteLater()
self.actions = None
if filters:
splitter = self.findChild( QtGui.QWidget, 'splitter' )
filters_widget = FilterList( filters, parent=splitter )
filters_widget.setObjectName('filters')
self.filters_layout.addWidget( filters_widget )
filters_widget.filters_changed_signal.connect( self.rebuild_query )
#
# filters might have default values, so we can only build the queries now
#
self.rebuild_query()
if actions:
#
# Attention, the ActionBox should only contain a reference to the
# table, and not to the table model, since this will cause the
# garbage collector to collect them both in random order, causing
# segfaults (see the test_qt_bindings
#
self.actions = ActionsBox( self,
self.get_collection,
self.get_selection )
self.actions.setActions( actions )
self.filters_layout.addWidget( self.actions )
def to_html( self ):
"""generates html of the table"""
if self._table_model:
query_getter = self._table_model.get_query_getter()
table = [[getattr( row, col[0] ) for col in self.admin.get_columns()]
for row in query_getter().all()]
context = {
'title': self.admin.get_verbose_name_plural(),
'table': table,
'columns': [field_attributes['name'] for _field, field_attributes in self.admin.get_columns()],
}
from camelot.view.templates import loader
from jinja2 import Environment
env = Environment( loader = loader )
tp = env.get_template( 'table_view.html' )
return tp.render( context )
def importFromFile( self ):
""""import data : the data will be imported in the activeMdiChild """
logger.info( 'call import method' )
from camelot.view.wizard.importwizard import ImportWizard
wizard = ImportWizard(self, self.admin)
wizard.exec_()
|
kurtraschke/camelot
|
camelot/view/controls/tableview.py
|
Python
|
gpl-2.0
| 31,637
|
[
"VisIt"
] |
7ab3b4060be0d462866befa1e7ea8bcfb8803152ceb086cd9a87bce5ad7e99d9
|
# -*- coding: utf-8 -*-
from . import *
from . fixtures import *
import os
from flanker import mime
from talon import quotations
@patch.object(quotations, 'MAX_LINES_COUNT', 1)
def test_too_many_lines():
msg_body = """Test reply
-----Original Message-----
Test"""
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_wrote():
msg_body = """Test reply
On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> wrote:
>
> Test
>
> Roman"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_wrote_date_with_slashes():
msg_body = """Test reply
On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>
> Test.
>
> Roman"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_wrote_allows_space_in_front():
msg_body = """Thanks Thanmai
On Mar 8, 2012 9:59 AM, "Example.com" <
r+7f1b094ceb90e18cca93d53d3703feae@example.com> wrote:
>**
> Blah-blah-blah"""
eq_("Thanks Thanmai", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_sent():
msg_body = """Test reply
On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> sent:
>
> Test
>
> Roman"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_line_starts_with_on():
msg_body = """Blah-blah-blah
On blah-blah-blah"""
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_reply_and_quotation_splitter_share_line():
# reply lines and 'On <date> <person> wrote:' splitter pattern
# are on the same line
msg_body = """reply On Wed, Apr 4, 2012 at 3:59 PM, bob@example.com wrote:
> Hi"""
eq_('reply', quotations.extract_from_plain(msg_body))
# test pattern '--- On <date> <person> wrote:' with reply text on
# the same line
msg_body = """reply--- On Wed, Apr 4, 2012 at 3:59 PM, me@domain.com wrote:
> Hi"""
eq_('reply', quotations.extract_from_plain(msg_body))
# test pattern '--- On <date> <person> wrote:' with reply text containing
# '-' symbol
msg_body = """reply
bla-bla - bla--- On Wed, Apr 4, 2012 at 3:59 PM, me@domain.com wrote:
> Hi"""
reply = """reply
bla-bla - bla"""
eq_(reply, quotations.extract_from_plain(msg_body))
def _check_pattern_original_message(original_message_indicator):
msg_body = u"""Test reply
-----{}-----
Test"""
eq_('Test reply', quotations.extract_from_plain(msg_body.format(unicode(original_message_indicator))))
def test_english_original_message():
_check_pattern_original_message('Original Message')
_check_pattern_original_message('Reply Message')
def test_german_original_message():
_check_pattern_original_message(u'Ursprüngliche Nachricht')
_check_pattern_original_message('Antwort Nachricht')
def test_danish_original_message():
_check_pattern_original_message('Oprindelig meddelelse')
def test_reply_after_quotations():
msg_body = """On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>
> Test
Test reply"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_reply_wraps_quotations():
msg_body = """Test reply
On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>
> Test
Regards, Roman"""
reply = """Test reply
Regards, Roman"""
eq_(reply, quotations.extract_from_plain(msg_body))
def test_reply_wraps_nested_quotations():
msg_body = """Test reply
On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>Test test
>On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>
>>
>> Test.
>>
>> Roman
Regards, Roman"""
reply = """Test reply
Regards, Roman"""
eq_(reply, quotations.extract_from_plain(msg_body))
def test_quotation_separator_takes_2_lines():
msg_body = """Test reply
On Fri, May 6, 2011 at 6:03 PM, Roman Tkachenko from Hacker News
<roman@definebox.com> wrote:
> Test.
>
> Roman
Regards, Roman"""
reply = """Test reply
Regards, Roman"""
eq_(reply, quotations.extract_from_plain(msg_body))
def test_quotation_separator_takes_3_lines():
msg_body = """Test reply
On Nov 30, 2011, at 12:47 PM, Somebody <
416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4@somebody.domain.com>
wrote:
Test message
"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_short_quotation():
msg_body = """Hi
On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
> Hello"""
eq_("Hi", quotations.extract_from_plain(msg_body))
def test_pattern_date_email_with_unicode():
msg_body = """Replying ok
2011/4/7 Nathan \xd0\xb8ova <support@example.com>
> Cool beans, scro"""
eq_("Replying ok", quotations.extract_from_plain(msg_body))
def test_english_from_block():
eq_('Allo! Follow up MIME!', quotations.extract_from_plain("""Allo! Follow up MIME!
From: somebody@example.com
Sent: March-19-11 5:42 PM
To: Somebody
Subject: The manager has commented on your Loop
Blah-blah-blah
"""))
def test_german_from_block():
eq_('Allo! Follow up MIME!', quotations.extract_from_plain(
"""Allo! Follow up MIME!
Von: somebody@example.com
Gesendet: Dienstag, 25. November 2014 14:59
An: Somebody
Betreff: The manager has commented on your Loop
Blah-blah-blah
"""))
def test_danish_from_block():
eq_('Allo! Follow up MIME!', quotations.extract_from_plain(
"""Allo! Follow up MIME!
Fra: somebody@example.com
Sendt: 19. march 2011 12:10
Til: Somebody
Emne: The manager has commented on your Loop
Blah-blah-blah
"""))
def test_quotation_marker_false_positive():
msg_body = """Visit us now for assistance...
>>> >>> http://www.domain.com <<<
Visit our site by clicking the link above"""
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_link_closed_with_quotation_marker_on_new_line():
msg_body = '''8.45am-1pm
From: somebody@example.com
<http://email.example.com/c/dHJhY2tpbmdfY29kZT1mMDdjYzBmNzM1ZjYzMGIxNT
> <bob@example.com <mailto:bob@example.com> >
Requester: '''
eq_('8.45am-1pm', quotations.extract_from_plain(msg_body))
def test_link_breaks_quotation_markers_sequence():
# link starts and ends on the same line
msg_body = """Blah
On Thursday, October 25, 2012 at 3:03 PM, life is short. on Bob wrote:
>
> Post a response by replying to this email
>
(http://example.com/c/YzOTYzMmE) >
> life is short. (http://example.com/c/YzMmE)
>
"""
eq_("Blah", quotations.extract_from_plain(msg_body))
# link starts after some text on one line and ends on another
msg_body = """Blah
On Monday, 24 September, 2012 at 3:46 PM, bob wrote:
> [Ticket #50] test from bob
>
> View ticket (http://example.com/action
_nonce=3dd518)
>
"""
eq_("Blah", quotations.extract_from_plain(msg_body))
def test_from_block_starts_with_date():
msg_body = """Blah
Date: Wed, 16 May 2012 00:15:02 -0600
To: klizhentas@example.com"""
eq_('Blah', quotations.extract_from_plain(msg_body))
def test_bold_from_block():
msg_body = """Hi
*From:* bob@example.com [mailto:
bob@example.com]
*Sent:* Wednesday, June 27, 2012 3:05 PM
*To:* travis@example.com
*Subject:* Hello
"""
eq_("Hi", quotations.extract_from_plain(msg_body))
def test_weird_date_format_in_date_block():
msg_body = """Blah
Date: Fri=2C 28 Sep 2012 10:55:48 +0000
From: tickets@example.com
To: bob@example.com
Subject: [Ticket #8] Test
"""
eq_('Blah', quotations.extract_from_plain(msg_body))
def test_dont_parse_quotations_for_forwarded_messages():
msg_body = """FYI
---------- Forwarded message ----------
From: bob@example.com
Date: Tue, Sep 4, 2012 at 1:35 PM
Subject: Two
line subject
To: rob@example.com
Text"""
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_forwarded_message_in_quotations():
msg_body = """Blah
-----Original Message-----
FYI
---------- Forwarded message ----------
From: bob@example.com
Date: Tue, Sep 4, 2012 at 1:35 PM
Subject: Two
line subject
To: rob@example.com
"""
eq_("Blah", quotations.extract_from_plain(msg_body))
def test_mark_message_lines():
# e - empty line
# s - splitter line
# m - line starting with quotation marker '>'
# t - the rest
lines = ['Hello', '',
# next line should be marked as splitter
'_____________',
'From: foo@bar.com',
'',
'> Hi',
'',
'Signature']
eq_('tessemet', quotations.mark_message_lines(lines))
lines = ['Just testing the email reply',
'',
'Robert J Samson',
'Sent from my iPhone',
'',
# all 3 next lines should be marked as splitters
'On Nov 30, 2011, at 12:47 PM, Skapture <',
('416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4'
'@skapture-staging.mailgun.org>'),
'wrote:',
'',
'Tarmo Lehtpuu has posted the following message on']
eq_('tettessset', quotations.mark_message_lines(lines))
def test_process_marked_lines():
# quotations and last message lines are mixed
# consider all to be a last message
markers = 'tsemmtetm'
lines = [str(i) for i in range(len(markers))]
lines = [str(i) for i in range(len(markers))]
eq_(lines, quotations.process_marked_lines(lines, markers))
# no splitter => no markers
markers = 'tmm'
lines = ['1', '2', '3']
eq_(['1', '2', '3'], quotations.process_marked_lines(lines, markers))
# text after splitter without markers is quotation
markers = 'tst'
lines = ['1', '2', '3']
eq_(['1'], quotations.process_marked_lines(lines, markers))
# message + quotation + signature
markers = 'tsmt'
lines = ['1', '2', '3', '4']
eq_(['1', '4'], quotations.process_marked_lines(lines, markers))
# message + <quotation without markers> + nested quotation
markers = 'tstsmt'
lines = ['1', '2', '3', '4', '5', '6']
eq_(['1'], quotations.process_marked_lines(lines, markers))
# test links wrapped with paranthesis
# link starts on the marker line
markers = 'tsmttem'
lines = ['text',
'splitter',
'>View (http://example.com',
'/abc',
')',
'',
'> quote']
eq_(lines[:1], quotations.process_marked_lines(lines, markers))
# link starts on the new line
markers = 'tmmmtm'
lines = ['text',
'>'
'>',
'>',
'(http://example.com) > ',
'> life is short. (http://example.com) '
]
eq_(lines[:1], quotations.process_marked_lines(lines, markers))
# check all "inline" replies
markers = 'tsmtmtm'
lines = ['text',
'splitter',
'>',
'(http://example.com)',
'>',
'inline reply',
'>']
eq_(lines, quotations.process_marked_lines(lines, markers))
# inline reply with link not wrapped in paranthesis
markers = 'tsmtm'
lines = ['text',
'splitter',
'>',
'inline reply with link http://example.com',
'>']
eq_(lines, quotations.process_marked_lines(lines, markers))
# inline reply with link wrapped in paranthesis
markers = 'tsmtm'
lines = ['text',
'splitter',
'>',
'inline reply (http://example.com)',
'>']
eq_(lines, quotations.process_marked_lines(lines, markers))
def test_preprocess():
msg = ('Hello\n'
'See <http://google.com\n'
'> for more\n'
'information On Nov 30, 2011, at 12:47 PM, Somebody <\n'
'416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n'
'@example.com>'
'wrote:\n'
'\n'
'> Hi')
# test the link is rewritten
# 'On <date> <person> wrote:' pattern starts from a new line
prepared_msg = ('Hello\n'
'See @@http://google.com\n'
'@@ for more\n'
'information\n'
' On Nov 30, 2011, at 12:47 PM, Somebody <\n'
'416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n'
'@example.com>'
'wrote:\n'
'\n'
'> Hi')
eq_(prepared_msg, quotations.preprocess(msg, '\n'))
msg = """
> <http://teemcl.mailgun.org/u/**aD1mZmZiNGU5ODQwMDNkZWZlMTExNm**
> MxNjQ4Y2RmOTNlMCZyPXNlcmdleS5v**YnlraG92JTQwbWFpbGd1bmhxLmNvbS**
> Z0PSUyQSZkPWUwY2U<http://example.org/u/aD1mZmZiNGU5ODQwMDNkZWZlMTExNmMxNjQ4Y>
"""
eq_(msg, quotations.preprocess(msg, '\n'))
# 'On <date> <person> wrote' shouldn't be spread across too many lines
msg = ('Hello\n'
'How are you? On Nov 30, 2011, at 12:47 PM,\n '
'Example <\n'
'416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n'
'@example.org>'
'wrote:\n'
'\n'
'> Hi')
eq_(msg, quotations.preprocess(msg, '\n'))
msg = ('Hello On Nov 30, smb wrote:\n'
'Hi\n'
'On Nov 29, smb wrote:\n'
'hi')
prepared_msg = ('Hello\n'
' On Nov 30, smb wrote:\n'
'Hi\n'
'On Nov 29, smb wrote:\n'
'hi')
eq_(prepared_msg, quotations.preprocess(msg, '\n'))
def test_preprocess_postprocess_2_links():
msg_body = "<http://link1> <http://link2>"
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_standard_replies():
for filename in os.listdir(STANDARD_REPLIES):
filename = os.path.join(STANDARD_REPLIES, filename)
if os.path.isdir(filename):
continue
with open(filename) as f:
msg = f.read()
m = mime.from_string(msg)
for part in m.walk():
if part.content_type == 'text/plain':
text = part.body
stripped_text = quotations.extract_from_plain(text)
reply_text_fn = filename[:-4] + '_reply_text'
if os.path.isfile(reply_text_fn):
with open(reply_text_fn) as f:
reply_text = f.read()
else:
reply_text = 'Hello'
eq_(reply_text, stripped_text,
"'%(reply)s' != %(stripped)s for %(fn)s" %
{'reply': reply_text, 'stripped': stripped_text,
'fn': filename})
|
saleswise/talon
|
tests/text_quotations_test.py
|
Python
|
apache-2.0
| 14,470
|
[
"VisIt"
] |
0901269f9bb478b9fa1db71a00d8a5af0032f3a2b00b1b18b66f8248cbe2a728
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# cat - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Emulate the un*x function with the same name"""
import os
import glob
import shared.returnvalues as returnvalues
from shared.base import client_id_dir
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.init import initialize_main_variables
from shared.parseflags import verbose, binary
from shared.validstring import valid_user_path
def signature():
"""Signature of the main function"""
defaults = {'path': REJECT_UNSET, 'dst': [''], 'flags': ['']}
return ['file_output', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
client_dir = client_id_dir(client_id)
defaults = signature()[1]
status = returnvalues.OK
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
flags = ''.join(accepted['flags'])
patterns = accepted['path']
dst = accepted['dst'][-1]
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
if verbose(flags):
for flag in flags:
output_objects.append({'object_type': 'text', 'text'
: '%s using flag: %s' % (op_name,
flag)})
if dst:
dst_mode = "wb"
real_dst = os.path.join(base_dir, dst)
relative_dst = real_dst.replace(base_dir, '')
if not valid_user_path(real_dst, base_dir, True):
logger.warning('%s tried to %s into restricted path %s ! (%s)'
% (client_id, op_name, real_dst, dst))
output_objects.append({'object_type': 'error_text',
'text': "invalid destination: '%s'" % \
dst})
return (output_objects, returnvalues.CLIENT_ERROR)
for pattern in patterns:
# Check directory traversal attempts before actual handling to avoid
# leaking information about file system layout while allowing
# consistent error messages
unfiltered_match = glob.glob(base_dir + pattern)
match = []
for server_path in unfiltered_match:
real_path = os.path.abspath(server_path)
if not valid_user_path(real_path, base_dir, True):
# out of bounds - save user warning for later to allow
# partial match:
# ../*/* is technically allowed to match own files.
logger.warning('%s tried to %s restricted path %s ! (%s)'
% (client_id, op_name, real_path, pattern))
continue
match.append(real_path)
# Now actually treat list of allowed matchings and notify if no
# (allowed) match
if not match:
output_objects.append({'object_type': 'file_not_found',
'name': pattern})
status = returnvalues.FILE_NOT_FOUND
for real_path in match:
output_lines = []
relative_path = real_path.replace(base_dir, '')
try:
fd = open(real_path, 'r')
# use file directly as iterator for efficiency
for line in fd:
output_lines.append(line)
fd.close()
except Exception, exc:
output_objects.append({'object_type': 'error_text',
'text': "%s: '%s': %s" % (op_name,
relative_path, exc)})
logger.error("%s: failed on '%s': %s" % (op_name,
relative_path, exc))
status = returnvalues.SYSTEM_ERROR
continue
if dst:
try:
out_fd = open(real_dst, dst_mode)
out_fd.writelines(output_lines)
out_fd.close()
except Exception, exc:
output_objects.append({'object_type': 'error_text',
'text': "write failed: '%s'" % exc})
logger.error("%s: write failed on '%s': %s" % (op_name,
real_dst, exc))
status = returnvalues.SYSTEM_ERROR
continue
output_objects.append({'object_type': 'text',
'text': "wrote %s to %s" % (relative_path,
relative_dst)})
# Prevent truncate after first write
dst_mode = "ab+"
else:
entry = {'object_type': 'file_output',
'lines': output_lines,
'wrap_binary': binary(flags),
'wrap_targets': ['lines']}
if verbose(flags):
entry['path'] = relative_path
output_objects.append(entry)
# TODO: rip this hack out into real download handler?
# Force download of files when output_format == 'file_format'
# This will only work for the first file matching a glob when
# using file_format.
# And it is supposed to only work for one file.
if user_arguments_dict.has_key('output_format'):
output_format = user_arguments_dict['output_format'][0]
if output_format == 'file':
output_objects.append(
{'object_type': 'start', 'headers':
[('Content-Disposition',
'attachment; filename="%s";' % \
os.path.basename(real_path))]})
return (output_objects, status)
|
heromod/migrid
|
mig/shared/functionality/cat.py
|
Python
|
gpl-2.0
| 7,229
|
[
"Brian"
] |
21445310738f0ea75ee31b91af2ddc81665fe9489e083c2f3211e091c48a028a
|
# rfc1751.py : Converts between 128-bit strings and a human-readable
# sequence of words, as defined in RFC1751: "A Convention for
# Human-Readable 128-bit Keys", by Daniel L. McDonald.
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew M. Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import binascii
from Cryptodome.Util.py3compat import *
from functools import reduce
binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101',
6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011',
12:'1100', 13:'1101', 14:'1110', 15:'1111'}
def _key2bin(s):
"Convert a key into a string of binary digits"
kl=[bord(x) for x in s]
kl=[binary[x>>4]+binary[x&15] for x in kl]
return ''.join(kl)
def _extract(key, start, length):
"""Extract a bitstring(2.x)/bytestring(2.x) from a string of binary digits, and return its
numeric value."""
k=key[start:start+length]
return reduce(lambda x,y: x*2+ord(y)-48, k, 0)
def key_to_english (key):
"""key_to_english(key:string(2.x)/bytes(3.x)) : string
Transform an arbitrary key into a string containing English words.
The key length must be a multiple of 8.
"""
english=''
for index in range(0, len(key), 8): # Loop over 8-byte subkeys
subkey=key[index:index+8]
# Compute the parity of the key
skbin=_key2bin(subkey) ; p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
# Append parity bits to the subkey
skbin=_key2bin(subkey+bchr((p<<6) & 255))
for i in range(0, 64, 11):
english=english+wordlist[_extract(skbin, i, 11)]+' '
return english[:-1] # Remove the trailing space
def english_to_key (s):
"""english_to_key(string):string(2.x)/bytes(2.x)
Transform a string into a corresponding key.
The string must contain words separated by whitespace; the number
of words must be a multiple of 6.
"""
L=s.upper().split() ; key=b('')
for index in range(0, len(L), 6):
sublist=L[index:index+6] ; char=9*[0] ; bits=0
for i in sublist:
index = wordlist.index(i)
shift = (8-(bits+11)%8) %8
y = index << shift
cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff
if (shift>5):
char[bits>>3] = char[bits>>3] | cl
char[(bits>>3)+1] = char[(bits>>3)+1] | cc
char[(bits>>3)+2] = char[(bits>>3)+2] | cr
elif shift>-3:
char[bits>>3] = char[bits>>3] | cc
char[(bits>>3)+1] = char[(bits>>3)+1] | cr
else: char[bits>>3] = char[bits>>3] | cr
bits=bits+11
subkey=reduce(lambda x,y:x+bchr(y), char, b(''))
# Check the parity of the resulting key
skbin=_key2bin(subkey)
p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
if (p&3) != _extract(skbin, 64, 2):
raise ValueError("Parity error in resulting key")
key=key+subkey[0:8]
return key
wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD",
"AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA",
"AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK",
"ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE",
"AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM",
"BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET",
"BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO",
"BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT",
"BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT",
"CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY",
"CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN",
"DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG",
"DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB",
"DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO",
"ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE",
"EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW",
"FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR",
"FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP",
"GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO",
"GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD",
"HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM",
"HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT",
"HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE",
"HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL",
"INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT",
"ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET",
"JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT",
"KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB",
"LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE",
"LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT",
"LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG",
"LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW",
"MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT",
"MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG",
"MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED",
"NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD",
"NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF",
"OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL",
"OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT",
"OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD",
"PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG",
"PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT",
"PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB",
"PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT",
"RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM",
"RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB",
"RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM",
"SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET",
"SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY",
"SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY",
"SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN",
"TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE",
"TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP",
"TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP",
"US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS",
"WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT",
"WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE",
"YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT",
"ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS",
"ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE",
"AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA",
"ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN",
"AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW",
"ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA",
"ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM",
"AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW",
"AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL",
"BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM",
"BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK",
"BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH",
"BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT",
"BEAU", "BECK", "BEEF", "BEEN", "BEER",
"BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN",
"BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE",
"BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE",
"BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT",
"BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK",
"BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT",
"BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK",
"BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS",
"BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN",
"BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD",
"BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG",
"BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST",
"BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF",
"CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL",
"CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL",
"CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF",
"CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG",
"CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY",
"CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA",
"COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN",
"COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK",
"COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST",
"COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB",
"CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY",
"CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE",
"DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN",
"DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS",
"DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED",
"DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK",
"DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT",
"DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES",
"DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA",
"DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG",
"DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK",
"DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK",
"DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST",
"EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT",
"EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT",
"EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED",
"FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL",
"FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT",
"FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST",
"FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE",
"FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE",
"FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW",
"FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM",
"FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL",
"FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL",
"FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY",
"FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY",
"FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA",
"GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH",
"GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE",
"GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT",
"GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN",
"GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD",
"GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG",
"GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB",
"GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN",
"GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH",
"GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR",
"HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK",
"HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE",
"HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR",
"HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL",
"HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN",
"HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT",
"HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE",
"HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK",
"HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL",
"HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK",
"HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE",
"HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH",
"INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE",
"ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE",
"JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL",
"JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN",
"JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY",
"JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST",
"JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL",
"KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL",
"KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW",
"KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD",
"KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN",
"LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD",
"LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS",
"LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER",
"LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST",
"LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU",
"LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB",
"LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST",
"LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE",
"LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD",
"LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK",
"LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE",
"LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE",
"MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI",
"MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK",
"MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE",
"MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK",
"MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH",
"MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT",
"MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS",
"MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD",
"MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON",
"MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH",
"MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK",
"MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL",
"NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR",
"NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS",
"NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA",
"NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON",
"NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB",
"OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY",
"OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE",
"ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS",
"OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY",
"OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT",
"RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE",
"RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR",
"RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA",
"REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT",
"RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD",
"ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME",
"ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS",
"ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY",
"RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE",
"RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE",
"SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE",
"SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR",
"SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK",
"SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS",
"SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN",
"SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE",
"SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE",
"SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW",
"SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY",
"SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT",
"SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB",
"SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA",
"SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE",
"SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR",
"STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH",
"SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF",
"SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM",
"TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK",
"TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM",
"TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS",
"TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN",
"THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER",
"TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY",
"TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG",
"TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR",
"TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG",
"TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE",
"TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK",
"TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER",
"USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST",
"VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY",
"VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE",
"WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK",
"WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM",
"WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY",
"WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR",
"WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM",
"WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE",
"WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE",
"WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD",
"WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE",
"YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR",
"YELL", "YOGA", "YOKE" ]
if __name__=='__main__':
data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
('CCAC2AED591056BE4F90FD441C534766',
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
('EFF81F9BFBC65350920CDD7416DE8009',
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
]
for key, words in data:
print('Trying key', key)
key=binascii.a2b_hex(key)
w2=key_to_english(key)
if w2!=words:
print('key_to_english fails on key', repr(key), ', producing', str(w2))
k2=english_to_key(words)
if k2!=key:
print('english_to_key fails on key', repr(key), ', producing', repr(k2))
|
Haynie-Research-and-Development/jarvis
|
deps/lib/python3.4/site-packages/Cryptodome/Util/RFC1751.py
|
Python
|
gpl-2.0
| 21,212
|
[
"Elk",
"MOE"
] |
55597f9ae8b3313ae537cb5fe4321598c235a606829d8f3f202a346dde34229f
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Match up intersecting intervals from two files. This performs a "full join",
any pair of intervals with any basewise overlap will be printed side-by-side.
usage: %prog bed1 bed2
"""
from __future__ import division
import psyco_full
import string
import sys
import bx.intervals.io
import bx.intervals.intersection
def main():
intersecters = {}
# Read second set into intersecter
for interval in bx.intervals.io.GenomicIntervalReader( open( sys.argv[2] ) ):
if not intersecters.has_key( interval.chrom ):
intersecters[ interval.chrom ] = bx.intervals.Intersecter()
intersecters[ interval.chrom ].add_interval( interval )
# Join with first set
for interval in bx.intervals.io.GenomicIntervalReader( open( sys.argv[1] ) ):
if intersecters.has_key( interval.chrom ):
intersection = intersecters[ interval.chrom ].find( interval.start, interval.end )
for interval2 in intersection:
print "\t".join( [ str( interval ), str( interval2 ) ] )
if __name__ == "__main__":
main()
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/interval_join.py
|
Python
|
bsd-3-clause
| 1,129
|
[
"Galaxy"
] |
264a6dbadf78f952f5731a6f1b5b47c01932bb81097b3f7ecdc6129a043ab5ed
|
'''Term conversion.'''
from aterm import visitor
class _ToInt(visitor.Visitor):
def visitTerm(self, term):
raise TypeError('not an integer term', term)
def visitInt(self, term):
return term.value
def toInt(term):
'''Convert an integer term to its integer value.'''
return _ToInt().visit(term)
class _ToReal(visitor.Visitor):
def visitTerm(self, term):
raise TypeError('not a real term', term)
def visitReal(self, term):
return term.value
def toReal(term):
'''Convert a real term to its real value.'''
return _ToReal().visit(term)
class _ToStr(visitor.Visitor):
def visitTerm(self, term):
raise TypeError('not a string term', term)
def visitStr(self, term):
return term.value
def toStr(term):
'''Convert a string term to its string value.'''
return _ToStr().visit(term)
class _ToLit(visitor.Visitor):
def visitTerm(self, term):
raise TypeError('not a literal term', term)
def visitLit(self, term):
return term.value
def toLit(term):
'''Convert a literal term to its value.'''
return _ToLit().visit(term)
class _ToList(visitor.Visitor):
def visitTerm(self, term):
raise TypeError('not a list term', term)
def visitNil(self, term):
return []
def visitCons(self, term):
head = term.head
tail = self.visit(term.tail)
return [head] + tail
def toList(term):
'''Convert a list term to a list of terms.'''
return _ToList().visit(term)
class _ToObj(visitor.Visitor):
def visitTerm(self, term):
raise TypeError('term not convertible', term)
def visitLit(self, term):
return term.value
def visitNil(self, term):
return []
def visitCons(self, term):
head = self.visit(term.head)
tail = self.visit(term.tail)
return [head] + tail
# def visitAppl(self, term):
# # return application terms unmodified
# return term
def toObj(term):
'''Recursively convert literal and list terms to the corresponding
Python objects.'''
return _ToObj().visit(term)
|
mewbak/idc
|
aterm/convert.py
|
Python
|
lgpl-2.1
| 1,934
|
[
"VisIt"
] |
a6fb0983eadd00f5d9f55c5f3ab38bf78442a41c4917e850455980b5d71e5551
|
# Copyright 2005 by Jonathan Taylor.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This module deals with CAPS markers.
A CAPS marker is a location a DifferentialCutsite as described below and a
set of primers that can be used to visualize this. More information can
be found in the paper `Konieczny and Ausubel (1993)`_ (PMID 8106085).
.. _`Konieczny and Ausubel (1993)`: http://dx.doi.org/10.1046/j.1365-313X.1993.04020403.x
"""
class DifferentialCutsite(object):
"""Differential enzyme cutsite in an alignment.
A differential cutsite is a location in an alignment where an enzyme cuts
at least one sequence and also cannot cut at least one other sequence.
Members:
- start - Where it lives in the alignment.
- enzyme - The enzyme that causes this.
- cuts_in - A list of sequences (as indexes into the alignment) the
enzyme cuts in.
- blocked_in - A list of sequences (as indexes into the alignment) the
enzyme is blocked in.
"""
def __init__(self, **kwds):
"""Initialize a DifferentialCutsite.
Each member (as listed in the class description) should be included as a
keyword.
"""
self.start = int(kwds["start"])
self.enzyme = kwds["enzyme"]
self.cuts_in = kwds["cuts_in"]
self.blocked_in = kwds["blocked_in"]
class AlignmentHasDifferentLengthsError(Exception):
pass
class CAPSMap(object):
"""A map of an alignment showing all possible dcuts.
Members:
- alignment - The alignment that is mapped.
- dcuts - A list of possible CAPS markers in the form of
DifferentialCutsites.
"""
def __init__(self, alignment, enzymes=None):
"""Initialize the CAPSMap.
Required:
- alignment - The alignment to be mapped.
Optional:
- enzymes - List of enzymes to be used to create the map.
Defaults to an empty list.
"""
if enzymes is None:
enzymes = []
self.sequences = [rec.seq for rec in alignment]
self.size = len(self.sequences)
self.length = len(self.sequences[0])
for seq in self.sequences:
if len(seq) != self.length:
raise AlignmentHasDifferentLengthsError
self.alignment = alignment
self.enzymes = enzymes
# look for dcuts
self._digest()
def _digest_with(self, enzyme):
cuts = [] # list of lists, one per sequence
all = []
# go through each sequence
for seq in self.sequences:
# grab all the cuts in the sequence
seq_cuts = [cut - enzyme.fst5 for cut in enzyme.search(seq)]
# maintain a list of all cuts in all sequences
all.extend(seq_cuts)
cuts.append(seq_cuts)
# we sort the all list and remove duplicates
all.sort()
last = -999
new = []
for cut in all:
if cut != last:
new.append(cut)
last = cut
all = new
# all now has indices for all sequences in the alignment
for cut in all:
# test for dcuts
cuts_in = []
blocked_in = []
for i in range(0, self.size):
seq = self.sequences[i]
if cut in cuts[i]:
cuts_in.append(i)
else:
blocked_in.append(i)
if cuts_in != [] and blocked_in != []:
self.dcuts.append(DifferentialCutsite(start=cut,
enzyme=enzyme,
cuts_in=cuts_in,
blocked_in=blocked_in))
def _digest(self):
self.dcuts = []
for enzyme in self.enzymes:
self._digest_with(enzyme)
|
zjuchenyuan/BioWeb
|
Lib/Bio/CAPS/__init__.py
|
Python
|
mit
| 4,043
|
[
"Biopython"
] |
71d2fe13247a88b7ee51a983bdbafa434bccff47a2fd83217eb1c183713c732c
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Vladimír Slávik 2010 - 2011
# Python 2.6, 3.1
#
# for Simutrans
# http://www.simutrans.com
#
# code is public domain
#
# pygame learning script - tool for reformatting trees using offsets
# very rudimentary but sufficient :-)
from __future__ import print_function
import os, math, sys
import simutools
#-----
Data = []
paksize = 128
outdir = "dump"
loadedimages = {}
#-----
def procObj(obj) :
objname = obj.ask("name")
cname = simutools.canonicalObjName(objname)
images = []
for param in simutools.ImageParameterNames :
images += map(lambda i: (param, i[0], i[1]), obj.ask_indexed(param))
for param in simutools.SingleImageParameterNames :
value = obj.ask(param)
if value :
images.append((param, "", value))
newimage = pygame.Surface((paksize, paksize))
newimage.fill((231,255,255)) # background
for i in range(len(images)) :
key, indices, raw_ref = images[i]
imgref = simutools.SimutransImgParam(raw_ref)
if not imgref.isEmpty() :
imgname = os.path.normpath(os.path.join(os.path.dirname(obj.srcfile), imgref.file + ".png"))
if not imgname in loadedimages :
loadedimages[imgname] = pygame.image.load(imgname)
srccoords_px = pygame.Rect(imgref.coords[1] * paksize, \
imgref.coords[0] * paksize, paksize, paksize) # calculate position on old image
newimage.blit(loadedimages[imgname], (0,0), srccoords_px) # copy image tile
off_x, off_y = imgref.offset
off_x, off_y = -off_x, -off_y
changed = True
save = False
while True :
pygame.time.wait(50)
pygame.event.pump()
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE] or keys[pygame.K_ESCAPE] :
pygame.time.wait(500)
break
elif keys[pygame.K_RETURN] :
pygame.time.wait(500)
save = True
break
elif keys[pygame.K_LEFT] :
off_x = off_x - 1
changed = True
elif keys[pygame.K_RIGHT] :
off_x = off_x + 1
changed = True
elif keys[pygame.K_UP] :
off_y = off_y - 1
changed = True
elif keys[pygame.K_DOWN] :
off_y = off_y + 1
changed = True
elif pygame.mouse.get_pressed()[0] :
mx, my = pygame.mouse.get_pos()
off_x = mx - 100 - paksize / 2
off_y = my - 100 - (paksize * 3) / 4
changed = True
if changed :
screen.fill((0,0,0))
screen.blit(newimage, (100,100))
screen.blit(cursor, (100 + off_x, 100 + off_y), newimage.get_bounding_rect())
pygame.display.flip()
changed = False
if save :
imgref.offset = -off_x, -off_y
obj.put(key + indices, str(imgref))
f = open(os.path.join(outdir, cname + ".dat"), 'w')
for l in obj.lines :
f.write(l)
f.close()
#-----
# main() is this piece of code
try :
import pygame
except ImportError :
print("This script needs PyGame to work!")
print("Visit http://www.pygame.org to get it.")
else :
pygame.display.init()
simutools.walkFiles(os.getcwd(), simutools.loadFile, cbparam=Data)
simutools.pruneList(Data)
simutools.pruneObjs(Data, ["tree"])
if not os.path.exists(outdir) :
os.mkdir(outdir)
screen = pygame.display.set_mode((200 + paksize, 200 + paksize))
cursor = pygame.image.load("targeting.png")
cursor.set_colorkey((255,255,255))
for item in Data :
procObj(item)
#-----
# EOF
|
simutrans/pak128
|
tools/tree-align-interactive.py
|
Python
|
artistic-2.0
| 3,342
|
[
"VisIt"
] |
23b50db106eeb88af97a9d56845cda6bfe1cfff4693c170c0c56b172e26f1109
|
import unittest
import nose
from nose.tools import eq_, ok_
import os, sys
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
sys.path = [os.path.join(PATH_HERE, '..')] + sys.path
import readMDA
from xmap_netcdf_reader import DetectorData
TESTDATA_DIR = os.path.join(PATH_HERE, '..', '..', 'test_data', '2013-07-26_mapping_mode')
MDA_FILE = 'SR12ID01H22707.mda'
NETCDF_DIR = os.path.join(TESTDATA_DIR, 'out_1374804236')
NETCDF_PATTERN = 'ioc5[3-4]_([0-9]*)\.nc'
''' The structure of test_data.xml is specifically built to test functionality of the
code. Changes to test_data.xml will cause failures in these tests.
'''
class DatasetLoadingTest(unittest.TestCase):
def simple_load_test(self):
fname = os.path.join(TESTDATA_DIR, MDA_FILE)
mda = readMDA.readMDA(fname, verbose=False)
self.assertEqual(mda[0]['rank'], 1)
class DetectorTest(unittest.TestCase):
def setUp(self):
self.d = DetectorData(
shape = (10,10),
pixelsteps_per_buffer = 1,
buffers_per_file = 1,
dirpaths = NETCDF_DIR,
filepattern = NETCDF_PATTERN,
mca_bins = 2048,
first_file_n = 1,
)
def get_all_file_groups_test(self):
fg_dict = self.d._get_all_file_groups()
self.assertTrue(not fg_dict[0]) # 0: entry doesn't exist
self.assertEqual(len(fg_dict[1]), 2) # 1: group has two paths
def get_all_file_groups_len_test(self):
fg_dict = self.d._get_all_file_groups()
self.assertEqual(len(fg_dict), 539) # 539 netCDF pairs
def get_step0_file_paths_test(self):
paths = self.d._get_file_paths_for_pixel_step(pixel_step=0)
self.assertEqual(len(paths), 2)
paths = self.d._get_file_paths_for_pixel_step(pixel_step=538)
self.assertEqual(len(paths), 2)
paths = self.d._get_file_paths_for_pixel_step(pixel_step=539)
self.assertEqual(len(paths), 0)
def get_file_paths_for_all_pixel_steps_test(self):
fg_dict = self.d._get_file_paths_for_all_pixel_steps()
self.assertEqual(len(fg_dict), 539)
self.assertEqual(len(fg_dict[1]), 2)
def build_reverse_file_lookup_test(self):
fg_dict = self.d._get_file_paths_for_all_pixel_steps()
d = self.d._build_reverse_file_lookup(fg_dict)
self.assertEqual(d['ioc53_1.nc'], [0])
self.assertEqual(d['ioc54_1.nc'], [0])
self.assertEqual(d['ioc53_539.nc'], [538])
self.assertEqual(d['ioc54_539.nc'], [538])
def reverse_file_lookup_test(self):
files = self.d._get_file_paths_for_all_pixel_steps()
reverse_dict = self.d._build_reverse_file_lookup(files)
self.assertEqual(len(reverse_dict), 539*2)
self.assertEqual(reverse_dict['ioc53_1.nc'][0], 0)
self.assertEqual(reverse_dict['ioc54_1.nc'][0], 0)
self.assertEqual(reverse_dict['ioc53_539.nc'][0], 538)
def get_data_location_test(self):
tests = [
[(0, 0, 0), (0, 0, 0)],
[(0, 5, 1), (0, 12, 3)],
[(0, 5, 2), (0, 0, 0)],
[(0, 9, 9), (0, 11, 3)],
]
for input, output in tests:
pixel_step, row, col = input
path, buffer_ix, module_ix, channel = self.d._get_data_location(pixel_step, row, col)
self.assertTrue(isinstance(path, basestring))
self.assertEqual((buffer_ix, module_ix, channel), output)
def enumerate_all_data_indices_in_file_test(self):
tests = [
# filename, no. of elements in file,
# first and last (pixel_step, row, col, channel, buffer_ix, module_ix)
['ioc53_1.nc', 52, ( 0, 0, 0, 0, 0, 0), ( 0, 5, 1, 3, 0, 12)],
['ioc54_1.nc', 48, ( 0, 5, 2, 0, 0, 0), ( 0, 9, 9, 3, 0, 11)],
['ioc53_539.nc', 52, (538, 0, 0, 0, 0, 0), (538, 5, 1, 3, 0, 12)],
]
for filename, el, first, last in tests:
d = self.d._enumerate_all_data_indices_in_file(filename)
self.assertEqual(len(d), el)
self.assertEqual(d[0], first)
self.assertEqual(d[-1], last)
if __name__ == '__main__':
nose.run(defaultTest=__name__)
|
Peter--K/Sakura
|
tests/file_tests.py
|
Python
|
bsd-3-clause
| 4,203
|
[
"NetCDF"
] |
e579523d269e1896b06c4c6943648568dfcb242ecde9338a957f0e84dbbfb281
|
""" JobRunningWaitingRatioPolicy
Policy that calculates the efficiency following the formula::
( running ) / ( running + waiting + staging )
if the denominator is smaller than 10, it does not take any decision.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
from DIRAC.WorkloadManagementSystem.Client import JobStatus
__RCSID__ = '$Id$'
class JobRunningWaitingRatioPolicy(PolicyBase):
"""
The JobRunningWaitingRatioPolicy class is a policy that checks the efficiency of the
jobs according to what is on JobDB.
Evaluates the JobRunningWaitingRatioPolicy results given by the JobCommand.JobCommand
"""
@staticmethod
def _evaluate(commandResult):
""" _evaluate
efficiency < 0.5 :: Banned
efficiency < 0.9 :: Degraded
"""
result = {
'Status': None,
'Reason': None
}
if not commandResult['OK']:
result['Status'] = 'Error'
result['Reason'] = commandResult['Message']
return S_OK(result)
commandResult = commandResult['Value']
if not commandResult:
result['Status'] = 'Unknown'
result['Reason'] = 'No values to take a decision'
return S_OK(result)
commandResult = commandResult[0]
if not commandResult:
result['Status'] = 'Unknown'
result['Reason'] = 'No values to take a decision'
return S_OK(result)
running = commandResult[JobStatus.RUNNING]
waiting = commandResult[JobStatus.WAITING]
staging = commandResult[JobStatus.STAGING]
total = running + waiting + staging
# we want a minimum amount of jobs to take a decision ( at least 10 pilots )
if total < 10:
result['Status'] = 'Unknown'
result['Reason'] = 'Not enough jobs to take a decision'
return S_OK(result)
efficiency = running / total
if efficiency <= 0.4:
result['Status'] = 'Banned'
elif efficiency <= 0.65:
result['Status'] = 'Degraded'
else:
result['Status'] = 'Active'
result['Reason'] = 'Job Running / Waiting ratio of %.2f' % efficiency
return S_OK(result)
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Policy/JobRunningWaitingRatioPolicy.py
|
Python
|
gpl-3.0
| 2,224
|
[
"DIRAC"
] |
dfdf9bc3f7c255cf8d95a41ab92a18ca55857aacdacb02da8a0523204df7e988
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkProgrammableAttributeDataFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkProgrammableAttributeDataFilter(), 'Processing.',
('vtkDataSet',), ('vtkDataSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkProgrammableAttributeDataFilter.py
|
Python
|
bsd-3-clause
| 521
|
[
"VTK"
] |
4db147d43745ef040b077864889f98b6483a227833062c10712db126307d9b3d
|
import unittest
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
if __name__ == '__main__':
import nose
nose.runmodule()
|
shikhardb/scikit-learn
|
sklearn/mixture/tests/test_gmm.py
|
Python
|
bsd-3-clause
| 15,189
|
[
"Gaussian"
] |
0ac72a5928d5f872026b52a27dcb021d4483c9414dda595ef842cfe01ff4e3e5
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class MpasModel(MakefilePackage):
"""The Model for Prediction Across Scales (MPAS) is a collaborative
project for developing atmosphere, ocean and other earth-system
simulation components for use in climate, regional climate and weather
studies."""
homepage = "https://mpas-dev.github.io/"
url = "https://github.com/MPAS-Dev/MPAS-Model/archive/v7.0.tar.gz"
maintainers = ['t-brown']
version('7.1', sha256='9b5c181b7d0163ae33d24d7a79ede6990495134b58cf4500ba5c8c94192102bc')
version('7.0', sha256='f898ce257e66cff9e29320458870570e55721d16cb000de7f2cc27de7fdef14f')
version('6.3', sha256='e7f1d9ebfeb6ada37d42a286aaedb2e69335cbc857049dc5c5544bb51e7a8db8')
version('6.2', sha256='2a81825a62a468bf5c56ef9d9677aa2eb88acf78d4f996cb49a7db98b94a6b16')
depends_on('mpi')
depends_on('parallelio')
patch('makefile.patch', when='@7.0')
parallel = False
resource(when='@6.2:6.3',
name='MPAS-Data',
git='https://github.com/MPAS-Dev/MPAS-Data.git',
commit='33561790de8b43087ab850be833f51a4e605f1bb')
resource(when='@7.0:',
name='MPAS-Data',
git='https://github.com/MPAS-Dev/MPAS-Data.git',
tag='v7.0')
def target(self, model, action):
spec = self.spec
satisfies = spec.satisfies
fflags = [self.compiler.openmp_flag]
cppflags = ['-D_MPI']
if satisfies('%gcc'):
fflags.extend([
'-ffree-line-length-none',
'-fconvert=big-endian',
'-ffree-form',
'-fdefault-real-8',
'-fdefault-double-8',
])
cppflags.append('-DUNDERSCORE')
elif satisfies('%fj'):
fflags.extend([
'-Free',
'-Fwide',
'-CcdRR8',
])
elif satisfies('%intel'):
fflags.extend([
'-r8',
'-convert big_endian',
'-FR',
])
cppflags.append('-DUNDERSCORE')
targets = [
'FC_PARALLEL={0}'.format(spec['mpi'].mpifc),
'CC_PARALLEL={0}'.format(spec['mpi'].mpicc),
'CXX_PARALLEL={0}'.format(spec['mpi'].mpicxx),
'FC_SERIAL={0}'.format(spack_fc),
'CC_SERIAL={0}'.format(spack_cc),
'CXX_SERIAL={0}'.format(spack_cxx),
'CFLAGS_OMP={0}'.format(self.compiler.openmp_flag),
'FFLAGS_OMP={0}'.format(' '.join(fflags)),
'CPPFLAGS={0}'.format(' '.join(cppflags)),
'PIO={0}'.format(spec['parallelio'].prefix),
'NETCDF={0}'.format(spec['netcdf-c'].prefix),
'NETCDFF={0}'.format(spec['netcdf-fortran'].prefix)
]
if satisfies('^parallelio+pnetcdf'):
targets.append(
'PNETCDF={0}'.format(spec['parallel-netcdf'].prefix)
)
targets.extend([
'USE_PIO2=true', 'CPP_FLAGS=-D_MPI', 'OPENMP=true',
'CORE={0}'.format(model), action
])
return targets
def build(self, spec, prefix):
copy_tree(join_path('MPAS-Data', 'atmosphere'),
join_path('src', 'core_atmosphere', 'physics'))
make(*self.target('init_atmosphere', 'all'))
mkdir('bin')
copy('init_atmosphere_model', 'bin')
make(*self.target('init_atmosphere', 'clean'))
make(*self.target('atmosphere', 'all'))
copy('atmosphere_model', 'bin')
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
|
LLNL/spack
|
var/spack/repos/builtin/packages/mpas-model/package.py
|
Python
|
lgpl-2.1
| 3,812
|
[
"NetCDF"
] |
ac213759f19920b570be53cec0ac92946d3ff7cb964175df8c1fa9d3fa653e1c
|
#
# special_block_parser_builder_test.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from antlr4 import *
from pynestml.meta_model.ast_nestml_compilation_unit import ASTNestMLCompilationUnit
from pynestml.meta_model.ast_source_location import ASTSourceLocation
from pynestml.generated.PyNestMLLexer import PyNestMLLexer
from pynestml.generated.PyNestMLParser import PyNestMLParser
from pynestml.symbol_table.symbol_table import SymbolTable
from pynestml.symbols.predefined_functions import PredefinedFunctions
from pynestml.symbols.predefined_types import PredefinedTypes
from pynestml.symbols.predefined_units import PredefinedUnits
from pynestml.symbols.predefined_variables import PredefinedVariables
from pynestml.utils.logger import LoggingLevel, Logger
from pynestml.visitors.ast_builder_visitor import ASTBuilderVisitor
# setups the infrastructure
PredefinedUnits.register_units()
PredefinedTypes.register_types()
PredefinedFunctions.register_functions()
PredefinedVariables.register_variables()
SymbolTable.initialize_symbol_table(ASTSourceLocation(start_line=0, start_column=0, end_line=0, end_column=0))
Logger.init_logger(LoggingLevel.NO)
class SpecialBlockParserBuilderTest(unittest.TestCase):
"""
This text is used to check the parsing of special blocks, i.e. for and while-blocks, is executed as expected
and the corresponding AST built correctly.
"""
def test(self):
# print('Start special block parsing and AST-building test...'),
input_file = FileStream(
os.path.join(os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), 'resources')),
'BlockTest.nestml')))
lexer = PyNestMLLexer(input_file)
# create a token stream
stream = CommonTokenStream(lexer)
stream.fill()
# parse the file
parser = PyNestMLParser(stream)
# print('done')
compilation_unit = parser.nestMLCompilationUnit()
ast_builder_visitor = ASTBuilderVisitor(stream.tokens)
ast = ast_builder_visitor.visit(compilation_unit)
# print('done')
self.assertTrue(isinstance(ast, ASTNestMLCompilationUnit))
if __name__ == '__main__':
unittest.main()
|
kperun/nestml
|
tests/special_block_parser_builder_test.py
|
Python
|
gpl-2.0
| 2,899
|
[
"VisIt"
] |
79790a4891fcb801a58d1919cc6510f249952ccb54fa1e66ba4d0a79833ea95f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
samStat
Created on Fri May 23 13:14:18 2014
@author: cjg
"""
import sys
import argparse
import pysam
import mapStat
def samStat_pysam(samFile, outputFile):
''' From resulted sam or bam file of mapping, find information of reference sequences and reads.
For reference sequences:
1. coverage percentage
2. coverage depth at each base pair
3. error rate/number (ins, del, sub)
4. number of reads mapped to it
For reads:
1. number of reported alignments that contains the query read
2. for each such alignment, what's the reference name and the qregion of the read
Input:
1. samFile: sam (bam) file name
2. outputFile: file for writing output
3. fileformat: either sam or bam, should do auto detect..
'''
mysam = pysam.Samfile(samFile,'r')
# total number of ref seqs
nRef = mysam.nreferences
sys.stdout.write(">> Number of reference sequences: {} \n".format(nRef))
# lengths of reference sequences
refLens = mysam.lengths
# dictionary including information about all the reference sequences and the reads
refSeq_dict = dict()
readSeq_dict = dict()
sys.stdout.write(">> go through each read \n")
count = 0
for read in mysam.fetch():
count += 1
rname = mysam.getrname(read.tid) # ref seq to which this read is mapped
qname = read.qname # name of the query sequence (read)
print qname, "\t", rname, "\t"
print read.cigarstring
# if this reference sequence is not in the dictionary, add it
if not refSeq_dict.has_key(rname):
refLen = refLens[read.tid] # length of the reference sequence
refSeq_dict[rname] = {'refLen':refLen, 'nReads':0, 'nReadsBp':0, 'nMatchBp':0,'nInsBp':0, 'nDelBp':0, 'nSubBp':0, 'coverage':[0]*refLen}
if not readSeq_dict.has_key(qname):
readSeq_dict[qname] = {'nMapping':0, 'mapInfo':list()}
## check CIGAR string
cigarstring = read.cigarstring # CIGAR string for this aligned read
cigarLens = mapStat.cigar(cigarstring)
## update the dictionary corresponding to the reference sequence
refSeq_dict[rname]['nReads'] += 1 # update number of mapped reads
readSeq_dict[qname]['nMapping'] += 1 # update number of mappings
refSeq_dict[rname]['nReadsBp'] += cigarLens['seq_len'] # update number of bps mapped to this ref seq
# update matching and substitution bps if possible
if cigarLens['match_len'] is not None:
refSeq_dict[rname]['nMatchBp'] += cigarLens['match_len']
if cigarLens['sub_len'] is not None:
refSeq_dict[rname]['nSubBp'] += cigarLens['sub_len']
refSeq_dict[rname]['nInsBp'] += cigarLens['ins_len'] # update number of insertion bps
refSeq_dict[rname]['nDelBp'] += cigarLens['del_len'] # update number of deletion bps
for apos in read.positions:
refSeq_dict[rname]['coverage'][apos] += 1
readSeq_dict[qname]['mapInfo'].append((read.qstart,read.qend, read.pos, read.aend, rname))
if count % 10000 == 0:
sys.stdout.write(' scanned {} records\n'.format(count))
mysam.close()
## get number of covered base pairs in the refrence sequences
# sys.stdout.write(">> Get number of covered basepairs \n")
# for key in refSeq_dict:
# refSeq_dict[key]['nCovBp'] = len(refSeq_dict[key]['mappedPos'])
sys.stdout.write(">> Write statistics in output file \n")
myout1 = open(outputFile+".ref", 'w')
myout1.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format('refName', 'refLen','nReads', 'nReadsBp', 'nMatchBp','nInsBp','nDelBp','nSubBp','nCovBp','maxCov','avgCov'))
for key in refSeq_dict:
d = refSeq_dict[key]
myout1.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(key, d['refLen'],d['nReads'], d['nReadsBp'], d['nMatchBp'],d['nInsBp'],d['nDelBp'],d['nSubBp'],d['refLen'] - d['coverage'].count(0),max(d['coverage']),float(d['nReadsBp'])/float(d['refLen'])))
myout1.close()
myout2 = open(outputFile+".read", 'w')
myout2.write("{}\t{}\t{}\n".format('readName', 'nMappings','Mappings'))
for key in readSeq_dict:
d = readSeq_dict[key]
myout2.write("{}\t{}\t".format(key, d['nMapping']))
for map in d['mapInfo']:
myout2.write("({}, {} # {}, {} @ {})".format(map[0],map[1],map[2],map[3],map[4]))
myout2.write("\n")
myout2.close()
## =================================================================
## samStat function without using pysam, which is unstable sometimes
## =================================================================
def samStat(samFile, outputFile):
''' From resulted sam or bam file of mapping, find information of reference sequences and reads.
For reference sequences:
1. coverage percentage
2. coverage depth at each base pair
3. error rate/number (ins, del, sub)
4. number of reads mapped to it
For reads:
1. number of reported alignments that contains the query read
2. for each such alignment, what's the reference name and the qregion of the read
Input:
1. samFile: sam (bam) file name
2. outputFile: file for writing output
3. fileformat: either sam or bam, should do auto detect..
'''
nReferences = 0 # number of reference sequences
refLens = [] # list of reference length
refNames = []# list of reference names
count = 0 # number of aligned records in the sam file
# dictionaries for the reference sequences and the read sequences
refSeq_dict = dict()
readSeq_dict = dict()
sys.stdout.write(">> Scan sam file \n")
# start scanning sam file
with open(samFile,'r') as mysam:
for line in mysam:
if line[0] == '@': # header line
if line[1:3] == 'SQ': # reference sequence dictionary
nReferences += 1
rname = line[(line.find('SN:') + len('SN:')) : line.find('\t',line.find('SN:'))] # referenece sequence name
rLen = line[(line.find('LN:') + len('LN:')) : line.find('\t',line.find('LN:'))] # reference sequence length
refLens.append(int(rLen))
refNames.append(rname)
else: # non-header line
line = line.strip()
count += 1
read = mapStat.readAlign(line) # parse the alignment record
if read['cigarstring']=='*':
continue
rname = read['rname'] # ref seq to which this read is mapped
qname = read['qname'] # name of the query sequence (read)
#print qname, "\t", rname, "\t"
#print read['cigarstring']
# if this reference sequence is not in the dictionary, initiate it
if not refSeq_dict.has_key(rname):
refLen = refLens[refNames.index(rname)] # length of the reference sequence
refSeq_dict[rname] = {'refLen':refLen, 'nReads':0, 'nReadsBp':0, 'nMatchBp':0,'nInsBp':0, 'nDelBp':0, 'nSubBp':0, 'nEdit':0,'coverage':[0]*refLen}
if not readSeq_dict.has_key(qname):
readSeq_dict[qname] = {'nMapping':0, 'mapInfo':list()}
#print qname, '\t', rname, '\t', refLen
## check CIGAR string
cigarstring = read['cigarstring'] # CIGAR string for this aligned read
cigarLens = mapStat.cigar(cigarstring)
## update the dictionary corresponding to the reference sequence
refSeq_dict[rname]['nReads'] += 1 # update number of mapped reads
readSeq_dict[qname]['nMapping'] += 1 # update number of mappings
refSeq_dict[rname]['nReadsBp'] += cigarLens['seq_len'] # update number of bps mapped to this ref seq
# update matching and substitution bps if possible
if cigarLens['match_len'] is not None:
refSeq_dict[rname]['nMatchBp'] += cigarLens['match_len']
if cigarLens['sub_len'] is not None:
refSeq_dict[rname]['nSubBp'] += cigarLens['sub_len']
refSeq_dict[rname]['nInsBp'] += cigarLens['ins_len'] # update number of insertion bps
refSeq_dict[rname]['nDelBp'] += cigarLens['del_len'] # update number of deletion bps
# update edit distance
if read['NM'] is not None:
refSeq_dict[rname]['nEdit'] += 1
# update the coverage at the mapped positions
for apos in read['positions']:
refSeq_dict[rname]['coverage'][apos-1] += 1
# store the mapping information for this read:
# start and end positions for both the query read and the ref seq
# is this a secondary alignment?
# is this a reverse complement?
readSeq_dict[qname]['mapInfo'].append((read['qstart'],read['qend'], read['rstart'], read['rend'], read['is_secondary_alignment'], read['is_reversecomplement'],read['NM'],rname))
if count % 10000 == 0:
sys.stdout.write(' scanned {} records\n'.format(count))
## get number of covered base pairs in the refrence sequences
# sys.stdout.write(">> Get number of covered basepairs \n")
# for key in refSeq_dict:
# refSeq_dict[key]['nCovBp'] = len(refSeq_dict[key]['mappedPos'])
sys.stdout.write(">> Write statistics in output file \n")
# print out statistics information for the reference sequences
myout1 = open(outputFile+".ref", 'w')
myout1.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format('refName', 'refLen','nReads', 'nReadsBp', 'nMatchBp','nInsBp','nDelBp','nSubBp','nEdit','nCovBp','maxCov','avgCov','coverage'))
for key in refSeq_dict:
d = refSeq_dict[key]
nCovBp = d['refLen'] - d['coverage'].count(0)
myout1.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(key, d['refLen'],d['nReads'], d['nReadsBp'], d['nMatchBp'],d['nInsBp'],d['nDelBp'],d['nSubBp'],d['nEdit'],nCovBp,max(d['coverage']),float(d['nReadsBp'])/float(d['refLen']),float(nCovBp)/float(d['refLen'])))
myout1.close()
# print out statistics information for the reads
myout2 = open(outputFile+".read", 'w')
myout2.write("{}\t{}\t{}\n".format('readName', 'nMappings','Mappings'))
for key in readSeq_dict:
d = readSeq_dict[key]
myout2.write("{}\t{}\t".format(key, d['nMapping']))
for thismap in d['mapInfo']:
# qstart, qend # rstart, rend # secondary # forward/backward @ edit distance, refName
myout2.write("({}, {} # {}, {} # {} # {} @ {}, {})".format(thismap[0],thismap[1],thismap[2],thismap[3],-1 if thismap[4] else 1, -1 if thismap[5] else 1,thismap[6],thismap[7]))
myout2.write("\n")
myout2.close()
## =================================================================
## argument parser
## =================================================================
parser = argparse.ArgumentParser(description="parse sam file and get summary statistics",
prog = 'samStat', #program name
prefix_chars='-', # prefix for options
fromfile_prefix_chars='@', # if options are read from file, '@args.txt'
conflict_handler='resolve', # for handling conflict options
add_help=True, # include help in the options
formatter_class=argparse.ArgumentDefaultsHelpFormatter # print default values for options in help message
)
## input files and directories
parser.add_argument("-i","--in",help="input sam file",dest='samFile',required=True)
## output directory
parser.add_argument("-o","--out",help="output statistics file",dest='outputFile',required=True)
## =================================================================
## main function
## =================================================================
def main(argv=None):
if argv is None:
args = parser.parse_args()
samStat(args.samFile,args.outputFile)
##==============================================================
## call from command line (instead of interactively)
##==============================================================
if __name__ == '__main__':
sys.exit(main())
|
chaij/pooPy
|
src/samStat.py
|
Python
|
mit
| 12,862
|
[
"pysam"
] |
a7e43805b3224d1f6753776af949a95bc9a1b7f28c26546a5c943eab5cad0e27
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .version import __version__
__doc__ = '''PySCeS ModelMap module: useful for exploring model component relations'''
class ModelMapBase(object):
name = None
def getName(self):
return self.name
def setName(self, name):
self.name = name
def get(self, attr):
"""Return an attribute whose name is str(attr)"""
try:
return getattr(self, attr)
except:
print("%s is not an attribute of this instance" % attr)
return None
class MapList(list):
def __init__(self, *args):
list.__init__(self, *args)
def asSet(self):
return set(self.__getslice__(0, self.__len__()))
class ModelMap(ModelMapBase):
__nDict__ = None
reactions = None
species = None
species_fixed = None
compartments = None
__model__ = None
__InitStrings__ = None
__InitDict__ = None
__not_inited__ = None
global_parameters = None
__parameter_store__ = None
def __init__(self, model):
self.setName(model.ModelFile[:-4])
self.__nDict__ = model.__nDict__
self.__model__ = model
self.__InitDict__ = self.__model__.__InitDict__.copy()
self.__compartments__ = self.__model__.__compartments__.copy()
for k in list(self.__InitDict__.keys()):
self.__InitDict__[k] = getattr(self.__model__, k)
self.global_parameters = []
self.__parameter_store__ = []
self.__not_inited__ = []
# operational shortcuts
self.addSpecies()
self.addReactions()
self.generateMappings()
self.addCompartments()
def __cleanString__(self, s):
s = s.lstrip()
s = s.rstrip()
return s
def addOneSpecies(self, species, fix=False):
s = Species(species)
s.setValue(self.__InitDict__[s.name])
if fix:
s.fixed = True
setattr(self, species, s)
self.species.append(s)
if fix:
self.species_fixed.append(s)
def addCompartments(self):
self.compartments = []
for c in self.__model__.__compartments__:
co = Compartment(
self.__model__.__compartments__[c]['name'],
self.__model__.__compartments__[c]['size'],
)
self.compartments.append(co)
setattr(self, c, co)
cname = [c.name for c in self.compartments]
for s in list(self.__model__.__sDict__.keys()):
if self.__model__.__sDict__[s]['compartment'] in cname:
getattr(self, self.__model__.__sDict__[s]['compartment']).setComponent(
getattr(self, s)
)
getattr(self, s).compartment = getattr(
self, self.__model__.__sDict__[s]['compartment']
)
for r in list(self.__model__.__nDict__.keys()):
if self.__model__.__nDict__[r]['compartment'] in cname:
getattr(self, self.__model__.__nDict__[r]['compartment']).setComponent(
getattr(self, r)
)
getattr(self, r).compartment = getattr(
self, self.__model__.__nDict__[r]['compartment']
)
def addOneReaction(self, reaction):
r = Reaction(reaction)
r.addFormula(self.__nDict__[r.name]['RateEq'].replace('self.', ''))
if self.__nDict__[r.name]['Type'] == 'Irrev':
r.reversible = False
fxnames = self.hasFixedSpecies()
for p in self.__nDict__[r.name]['Params']:
p = p.replace('self.', '')
if (
p not in self.hasGlobalParameters()
and p not in fxnames
and p not in self.__compartments__
):
if p in self.__InitDict__:
par = Parameter(p, self.__InitDict__[p])
else:
par = Parameter(p)
if p not in self.__not_inited__:
self.__not_inited__.append(p)
par.setAssociation(r)
self.global_parameters.append(par)
setattr(self, p, par)
r.addParameter(par)
elif p not in fxnames and p not in self.__compartments__:
pidx = self.hasGlobalParameters().index(p)
self.global_parameters[pidx].setAssociation(r)
r.addParameter(self.global_parameters[pidx])
setattr(self, reaction, r)
self.reactions.append(r)
def addSpecies(self):
self.species = []
self.species_fixed = []
for s in self.__model__.species:
self.addOneSpecies(s, fix=False)
for s in self.__model__.fixed_species:
self.addOneSpecies(s, fix=True)
def addReactions(self):
self.reactions = []
for r in self.__model__.reactions:
self.addOneReaction(r)
def generateMappings(self):
for reac in self.reactions:
for reag in self.__nDict__[reac.name]['Reagents']:
if self.__nDict__[reac.name]['Reagents'][reag] < 0.0:
reac.addSubstrate(getattr(self, reag.replace('self.', '')))
getattr(self, reag.replace('self.', '')).setSubstrate(
getattr(self, reac.name)
)
else:
reac.addProduct(getattr(self, reag.replace('self.', '')))
getattr(self, reag.replace('self.', '')).setProduct(
getattr(self, reac.name)
)
reac.stoichiometry.setdefault(
reag.replace('self.', ''),
self.__nDict__[reac.name]['Reagents'][reag],
)
for mod in self.__nDict__[reac.name]['Modifiers']:
reac.addModifier(getattr(self, mod.replace('self.', '')))
getattr(self, mod.replace('self.', '')).setModifier(
getattr(self, reac.name)
)
def hasReactions(self):
return MapList([r.name for r in self.reactions])
def hasSpecies(self):
return MapList([s.name for s in self.species])
def hasFixedSpecies(self):
return MapList([s.name for s in self.species_fixed])
def findReactionsThatIncludeAllSpecifiedReagents(self, *args):
assert len(args) > 1, '\nNeed two or more species for this one!'
setlist = [getattr(self, s).isReagentOf().asSet() for s in args]
isect = setlist[0]
for s in setlist:
isect.intersection_update(s)
return MapList(isect)
def hasGlobalParameters(self):
return MapList(p.name for p in self.global_parameters)
class Reaction(ModelMapBase):
modifiers = None
substrates = None
products = None
stoichiometry = None
parameters = None
reversible = True
formula = None
compartment = None
def __init__(self, name):
self.setName(name)
self.modifiers = []
self.substrates = []
self.products = []
self.stoichiometry = {}
self.parameters = []
def addSubstrate(self, species):
setattr(self, species.name, species)
self.substrates.append(species)
def addProduct(self, species):
setattr(self, species.name, species)
self.products.append(species)
def addModifier(self, species):
setattr(self, species.name, species)
self.modifiers.append(species)
def addFormula(self, formula):
self.formula = formula
def addParameter(self, par):
setattr(self, par.name, par)
self.parameters.append(par)
def hasProducts(self, t=type):
return MapList([p.name for p in self.products])
def hasSubstrates(self):
return MapList([s.name for s in self.substrates])
def hasModifiers(self):
return MapList([m.name for m in self.modifiers])
def hasParameters(self):
return MapList([p.name for p in self.parameters])
def hasReagents(self):
return MapList(self.hasSubstrates() + self.hasProducts())
class NumberBase(ModelMapBase):
value = None
def __call__(self):
return self.value
def getValue(self):
return self.value
def setValue(self, v):
self.value = v
class Species(NumberBase):
subs = None
prods = None
mods = None
fixed = False
compartment = None
def __init__(self, name):
self.setName(name)
self.subs = []
self.prods = []
self.mods = []
def setSubstrate(self, reaction):
setattr(self, reaction.name, reaction)
self.subs.append(reaction)
def setProduct(self, reaction):
setattr(self, reaction.name, reaction)
self.prods.append(reaction)
def setModifier(self, reaction):
setattr(self, reaction.name, reaction)
self.mods.append(reaction)
def isSubstrateOf(self):
return MapList([r.name for r in self.subs])
def isProductOf(self):
return MapList([r.name for r in self.prods])
def isModifierOf(self):
return MapList([r.name for r in self.mods])
def isReagentOf(self):
return MapList(self.isSubstrateOf() + self.isProductOf())
class Parameter(NumberBase):
association = None
formula = None
def __init__(self, name, value=None):
self.name = name
self.value = value
self.association = []
def setAssociation(self, reac):
self.association.append(reac)
setattr(self, reac.name, reac)
def isParameterOf(self):
return MapList([a.name for a in self.association])
def setFormula(self, formula):
self.formula = formula
class Compartment(NumberBase):
components = None
def __init__(self, name, value=None):
self.name = name
self.value = value
self.components = []
def setComponent(self, comp):
self.components.append(comp)
setattr(self, comp.name, comp)
def hasComponents(self):
return MapList([a.name for a in self.components])
if __name__ == '__main__':
import pysces
M = pysces.model('pysces_model_linear1')
M.doLoad()
print('\nModel', M.ModelFile)
print('=============')
modmap = ModelMap(M)
print('Reactions\n', modmap.hasReactions())
print('Species\n', modmap.hasSpecies())
print('FixedSpecies\n', modmap.hasFixedSpecies())
print(' ')
print('R1 has reagents\n', modmap.R1.hasReagents())
print('R1 has sub\n', modmap.R1.hasSubstrates())
print('R1 has prod\n', modmap.R1.hasProducts())
print('R1 has mod\n', modmap.R1.hasModifiers())
print(' ')
print('s2 is reagent\n', modmap.s2.isReagentOf())
print('s2 is sub\n', modmap.s2.isSubstrateOf())
print('s2 is prod\n', modmap.s2.isProductOf())
print('s2 is mod\n', modmap.s2.isModifierOf())
print(' ')
print('R2 stoich\n', modmap.R2.stoichiometry)
print(' ')
print(
'findReactionsThatIncludeAllSpecifiedReagents(A, B):',
modmap.findReactionsThatIncludeAllSpecifiedReagents('s1', 's2'),
)
print('\nmodmap.hasGlobalParameters\n', modmap.hasGlobalParameters())
print('\nParameter associations')
for p in modmap.global_parameters:
print('%s.isParameterOf() %s' % (p.name, p.isParameterOf()))
|
bgoli/pysces
|
pysces/PyscesModelMap.py
|
Python
|
bsd-3-clause
| 12,084
|
[
"PySCeS"
] |
3d5be5deae7e420f8a86ddc590c778af21c486baade0bea2fc860ba6a2ad4dca
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <https://matplotlib.org/basemap/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = np.full(land_mask.shape[0], -9999, dtype='int')
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9998], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
kevin-intel/scikit-learn
|
examples/neighbors/plot_species_kde.py
|
Python
|
bsd-3-clause
| 4,755
|
[
"Gaussian"
] |
675dc9c84daf15b0f7b9210c3b8daa69c4b928944b56cd59de76baee2b92071d
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see winpython/__init__.py for details)
"""
WinPython Package Manager
Created on Fri Aug 03 14:32:26 2012
"""
from __future__ import print_function
import os
import os.path as osp
import shutil
import re
import sys
import subprocess
# Local imports
from winpython import utils
from winpython.config import DATA_PATH
from winpython.py3compat import configparser as cp
# from former wppm separate script launcher
from argparse import ArgumentParser
from winpython import py3compat
# Workaround for installing PyVISA on Windows from source:
os.environ['HOME'] = os.environ['USERPROFILE']
def get_package_metadata(database, name):
"""Extract infos (description, url) from the local database"""
# Note: we could use the PyPI database but this has been written on
# machine which is not connected to the internet
db = cp.ConfigParser()
db.readfp(open(osp.join(DATA_PATH, database)))
metadata = dict(description='', url='http://pypi.python.org/pypi/' + name)
for key in metadata:
name1 = name.lower()
# wheel replace '-' per '_' in key
for name2 in (name1, name1.split('-')[0], name1.replace('-', '_'),
'-'.join(name1.split('_'))):
try:
metadata[key] = db.get(name2, key)
break
except (cp.NoSectionError, cp.NoOptionError):
pass
return metadata
class BasePackage(object):
def __init__(self, fname):
self.fname = fname
self.name = None
self.version = None
self.architecture = None
self.pyversion = None
self.description = None
self.url = None
def __str__(self):
text = "%s %s" % (self.name, self.version)
pytext = ""
if self.pyversion is not None:
pytext = " for Python %s" % self.pyversion
if self.architecture is not None:
if not pytext:
pytext = " for Python"
pytext += " %dbits" % self.architecture
text += "%s\n%s\nWebsite: %s\n[%s]" % (pytext, self.description,
self.url,
osp.basename(self.fname))
return text
def is_compatible_with(self, distribution):
"""Return True if package is compatible with distribution in terms of
architecture and Python version (if applyable)"""
iscomp = True
if self.architecture is not None:
# Source distributions (not yet supported though)
iscomp = iscomp and self.architecture == distribution.architecture
if self.pyversion is not None:
# Non-pure Python package
iscomp = iscomp and self.pyversion == distribution.version
return iscomp
def extract_optional_infos(self):
"""Extract package optional infos (description, url)
from the package database"""
metadata = get_package_metadata('packages.ini', self.name)
for key, value in list(metadata.items()):
setattr(self, key, value)
class Package(BasePackage):
def __init__(self, fname):
BasePackage.__init__(self, fname)
self.files = []
self.extract_infos()
self.extract_optional_infos()
def extract_infos(self):
"""Extract package infos (name, version, architecture)
from filename (installer basename)"""
bname = osp.basename(self.fname)
if bname.endswith('.exe'):
# distutils bdist_wininst
match = re.match(utils.WININST_PATTERN, bname)
if match is not None:
(self.name, self.version,
_t0, _qtver, arch, _t1, self.pyversion, _t2) = match.groups()
self.architecture = 32 if arch == 'win32' else 64
return
# NSIS
pat = r'([a-zA-Z0-9\-\_]*)-Py([0-9\.]*)-x(64|32)-gpl-([0-9\.\-]*[a-z]*)\.exe'
match = re.match(pat, bname)
if match is not None:
self.name, self.pyversion, arch, self.version = match.groups()
self.architecture = int(arch)
return
# NSIS complement to match PyQt4-4.10.4-gpl-Py3.4-Qt4.8.6-x32.exe
pat = r'([a-zA-Z0-9\_]*)-([0-9\.]*[a-z]*)-gpl-Py([0-9\.]*)-.*-x(64|32)\.exe'
match = re.match(pat, bname)
if match is not None:
self.name, self.version, self.pyversion, arch = match.groups()
self.architecture = int(arch)
return
match = re.match(r'([a-zA-Z0-9\-\_]*)-([0-9\.]*[a-z]*)-py([0-9\.]*)-x(64|32)-([a-z0-9\.\-]*).exe', bname)
if match is not None:
self.name, self.version, self.pyversion, arch, _pyqt = match.groups()
self.architecture = int(arch)
return
# New : Binary wheel case
elif bname.endswith(('32.whl', '64.whl')):
match = re.match(utils.WHEELBIN_PATTERN, bname)
# typical match is ('scipy', '0.14.1rc1', '34', 'win32')
if match is not None:
self.name, self.version, self.pywheel , arch = match.groups()
# self.pywheel version is '34' not 3.4
self.pyversion = self.pywheel[:1] + '.' + self.pywheel[1:]
# wheel arch is 'win32' or 'win_amd64'
self.architecture = 32 if arch == 'win32' else 64
return
elif bname.endswith(('.zip', '.tar.gz', '.whl')):
# distutils sdist
infos = utils.get_source_package_infos(bname)
if infos is not None:
self.name, self.version = infos
return
raise NotImplementedError("Not supported package type %s" % bname)
def logpath(self, logdir):
"""Return full log path"""
return osp.join(logdir, osp.basename(self.fname+'.log'))
def save_log(self, logdir):
"""Save log (pickle)"""
header = ['# WPPM package installation log',
'# ',
'# Package: %s v%s' % (self.name, self.version),
'']
open(self.logpath(logdir), 'w').write('\n'.join(header + self.files))
def load_log(self, logdir):
"""Load log (pickle)"""
try:
data = open(self.logpath(logdir), 'U').readlines()
except (IOError, OSError):
data = [] # it can be now ()
self.files = []
for line in data:
relpath = line.strip()
if relpath.startswith('#') or len(relpath) == 0:
continue
self.files.append(relpath)
def remove_log(self, logdir):
"""Remove log (after uninstalling package)"""
try:
os.remove(self.logpath(logdir))
except WindowsError:
pass
class WininstPackage(BasePackage):
def __init__(self, fname, distribution):
BasePackage.__init__(self, fname)
self.logname = None
self.distribution = distribution
self.architecture = distribution.architecture
self.pyversion = distribution.version
self.extract_infos()
self.extract_optional_infos()
def extract_infos(self):
"""Extract package infos (name, version, architecture)"""
match = re.match(r'Remove([a-zA-Z0-9\-\_\.]*)\.exe', self.fname)
if match is None:
return
self.name = match.groups()[0]
self.logname = '%s-wininst.log' % self.name
fd = open(osp.join(self.distribution.target, self.logname), 'U')
searchtxt = 'DisplayName='
for line in fd.readlines():
pos = line.find(searchtxt)
if pos != -1:
break
else:
return
fd.close()
match = re.match(r'Python %s %s-([0-9\.]*)'
% (self.pyversion, self.name),
line[pos+len(searchtxt):])
if match is None:
return
self.version = match.groups()[0]
def uninstall(self):
"""Uninstall package"""
subprocess.call([self.fname, '-u', self.logname],
cwd=self.distribution.target)
class Distribution(object):
# PyQt module is now like :PyQt4-...
NSIS_PACKAGES = ('PyQt4', 'PyQwt', 'PyQt5') # known NSIS packages
def __init__(self, target=None, verbose=False, indent=False):
self.target = target
self.verbose = verbose
self.indent = indent
self.logdir = None
# if no target path given, take the current python interpreter one
if self.target is None:
self.target = os.path.dirname(sys.executable)
self.init_log_dir()
self.to_be_removed = [] # list of directories to be removed later
self.version, self.architecture = utils.get_python_infos(target)
def clean_up(self):
"""Remove directories which couldn't be removed when building"""
for path in self.to_be_removed:
try:
shutil.rmtree(path, onerror=utils.onerror)
except WindowsError:
print("Directory %s could not be removed" % path,
file=sys.stderr)
def remove_directory(self, path):
"""Try to remove directory -- on WindowsError, remove it later"""
try:
shutil.rmtree(path)
except WindowsError:
self.to_be_removed.append(path)
def init_log_dir(self):
"""Init log path"""
path = osp.join(self.target, 'Logs')
if not osp.exists(path):
os.mkdir(path)
self.logdir = path
def copy_files(self, package, targetdir,
srcdir, dstdir, create_bat_files=False):
"""Add copy task"""
srcdir = osp.join(targetdir, srcdir)
if not osp.isdir(srcdir):
return
offset = len(srcdir)+len(os.pathsep)
for dirpath, dirnames, filenames in os.walk(srcdir):
for dname in dirnames:
t_dname = osp.join(dirpath, dname)[offset:]
src = osp.join(srcdir, t_dname)
dst = osp.join(dstdir, t_dname)
if self.verbose:
print("mkdir: %s" % dst)
full_dst = osp.join(self.target, dst)
if not osp.exists(full_dst):
os.mkdir(full_dst)
package.files.append(dst)
for fname in filenames:
t_fname = osp.join(dirpath, fname)[offset:]
src = osp.join(srcdir, t_fname)
if dirpath.endswith('_system32'):
# Files that should be copied in %WINDIR%\system32
dst = fname
else:
dst = osp.join(dstdir, t_fname)
if self.verbose:
print("file: %s" % dst)
full_dst = osp.join(self.target, dst)
shutil.move(src, full_dst)
package.files.append(dst)
name, ext = osp.splitext(dst)
if create_bat_files and ext in ('', '.py'):
dst = name + '.bat'
if self.verbose:
print("file: %s" % dst)
full_dst = osp.join(self.target, dst)
fd = open(full_dst, 'w')
fd.write("""@echo off
python "%~dpn0""" + ext + """" %*""")
fd.close()
package.files.append(dst)
def create_file(self, package, name, dstdir, contents):
"""Generate data file -- path is relative to distribution root dir"""
dst = osp.join(dstdir, name)
if self.verbose:
print("create: %s" % dst)
full_dst = osp.join(self.target, dst)
open(full_dst, 'w').write(contents)
package.files.append(dst)
def get_installed_packages(self):
"""Return installed packages"""
# Packages installed with WPPM
wppm = [Package(logname[:-4]) for logname in os.listdir(self.logdir)
if '.whl.log' not in logname ]
# Packages installed with distutils wininst
wininst = []
for name in os.listdir(self.target):
if name.startswith('Remove') and name.endswith('.exe'):
try:
pack = WininstPackage(name, self)
except IOError:
continue
if pack.name is not None and pack.version is not None:
wininst.append(pack)
# Include package installed via pip (not via WPPM)
try:
if os.path.dirname(sys.executable) == self.target:
# direct way: we interrogate ourself
import imp, pip
pip.utils.pkg_resources = imp.reload(pip.utils.pkg_resources)
pip_list = [(i.key, i.version)
for i in pip.get_installed_distributions()]
else:
# indirect way: we interrogate something else
cmdx=[osp.join(self.target, 'python.exe'), '-c',
"import pip;print('+!+'.join(['%s@+@%s@+@' % (i.key,i.version) for i in pip.get_installed_distributions()]))"]
p = subprocess.Popen(cmdx, shell=True, stdout=subprocess.PIPE,
cwd=self.target)
stdout, stderr = p.communicate()
start_at = 2 if sys.version_info >= (3,0) else 0
pip_list = [line.split("@+@")[:2] for line in
("%s" % stdout)[start_at:].split("+!+")]
# create pip package list
wppip = [Package('%s-%s-py2.py3-none-any.whl' %
(i[0].lower(), i[1])) for i in pip_list]
# pip package version is supposed better
already = set(b.name.replace('-', '_') for b in wppip+wininst)
wppm = wppip + [i for i in wppm
if i.name.replace('-', '_').lower() not in already]
except:
pass
return sorted(wppm + wininst, key=lambda tup: tup.name.lower())
def find_package(self, name):
"""Find installed package"""
for pack in self.get_installed_packages():
if pack.name == name:
return pack
def uninstall_existing(self, package):
"""Uninstall existing package (or package name)"""
if isinstance(package ,str):
pack = self.find_package(package)
else:
pack = self.find_package(package.name)
if pack is not None:
self.uninstall(pack)
def patch_all_shebang(self, to_movable=True, max_exe_size=999999):
"""make all python launchers relatives"""
import glob
import os
for ffname in glob.glob(r'%s\Scripts\*.exe' % self.target):
size = os.path.getsize(ffname)
if size <= max_exe_size:
utils.patch_shebang_line(ffname, to_movable=to_movable)
def install(self, package, install_options=None):
"""Install package in distribution"""
assert package.is_compatible_with(self)
tmp_fname = None
# wheel addition
if package.fname.endswith(('.whl', '.tar.gz', '.zip')):
self.install_bdist_direct(package, install_options=install_options)
bname = osp.basename(package.fname)
if bname.endswith('.exe'):
if re.match(r'(' + ('|'.join(self.NSIS_PACKAGES)) + r')-', bname):
self.install_nsis_package(package)
else:
self.install_bdist_wininst(package)
elif bname.endswith('.msi'):
self.install_bdist_msi(package)
self.handle_specific_packages(package)
# minimal post-install actions
self.patch_standard_packages(package.name)
if not package.fname.endswith(('.whl', '.tar.gz', '.zip')):
package.save_log(self.logdir)
if tmp_fname is not None:
os.remove(tmp_fname)
def do_pip_action(self, actions=None, install_options=None):
"""Do pip action in a distribution"""
my_list = install_options
if my_list is None:
my_list = []
my_actions = actions
if my_actions is None:
my_actions = []
executing = osp.join(self.target, '..', 'scripts', 'env.bat')
if osp.isfile(executing):
complement = [r'&&' , 'cd' , '/D', self.target,
r'&&', osp.join(self.target, 'python.exe') ]
complement += [ '-m', 'pip']
else:
executing = osp.join(self.target, 'python.exe')
complement = [ '-m', 'pip']
try:
fname = utils.do_script(this_script=None,
python_exe=executing,
architecture=self.architecture, verbose=self.verbose,
install_options=complement + my_actions + my_list)
except RuntimeError:
if not self.verbose:
print("Failed!")
raise
def patch_standard_packages(self, package_name=''):
"""patch Winpython packages in need"""
import filecmp
# 'pywin32' minimal post-install (pywin32_postinstall.py do too much)
if package_name.lower() == "pywin32" or package_name == '':
origin = self.target + (r"\Lib\site-packages\pywin32_system32")
destin = self.target
if osp.isdir(origin):
for name in os.listdir(origin):
here, there = osp.join(origin, name), osp.join(destin, name)
if (not os.path.exists(there) or
not filecmp.cmp(here, there)):
shutil.copyfile(here, there)
# 'pip' to do movable launchers (around line 100) !!!!
# rational: https://github.com/pypa/pip/issues/2328
if package_name.lower() == "pip" or package_name == '':
# ensure pip will create movable launchers
utils.patch_sourcefile(
self.target + (
r"\Lib\site-packages\pip\_vendor\distlib\scripts.py"),
" executable = get_executable()",
" executable = os.path.join(os.path.basename(get_executable()))")
# create movable launchers for previous package installations
self.patch_all_shebang()
if package_name.lower() == "spyder" or package_name == '':
# spyder don't goes on internet without I ask
utils.patch_sourcefile(
self.target + (
r"\Lib\site-packages\spyderlib\config\main.py"),
"'check_updates_on_startup': True,",
"'check_updates_on_startup': False,")
# workaround bad installers
if package_name.lower() == "theano" or package_name == '':
self.create_pybat(['theano-cache', 'theano-nose', 'theano-test'])
if package_name.lower() == "numba" or package_name == '':
self.create_pybat(['numba', 'pycc'])
for checklist in("odo", "vitables", "cxfreeze"):
if package_name.lower() == checklist or package_name == '':
self.create_pybat(checklist)
def create_pybat(self, names, contents="""@echo off
python "%~dpn0" %*"""):
"""Create launcher batch script when missing"""
scriptpy = osp.join(self.target, 'Scripts') # std Scripts of python
my_list = names if list(names) == names else [names]
for name in my_list:
if osp.isdir(scriptpy) and osp.isfile(osp.join(scriptpy, name)):
if (not osp.isfile(osp.join(scriptpy, name + '.exe')) and
not osp.isfile(osp.join(scriptpy, name + '.bat'))):
fd = open(osp.join(scriptpy, name + '.bat'), 'w')
fd.write(contents)
fd.close()
def handle_specific_packages(self, package):
"""Packages requiring additional configuration"""
if package.name.lower() in ('pyqt4', 'pyqt5'):
# Qt configuration file (where to find Qt)
name = 'qt.conf'
contents = """[Paths]
Prefix = .
Binaries = ."""
self.create_file(package, name,
osp.join('Lib', 'site-packages', package.name),
contents)
self.create_file(package, name, '.',
contents.replace('.', './Lib/site-packages/%s' % package.name))
# pyuic script
if package.name.lower() == 'pyqt5':
# see http://code.activestate.com/lists/python-list/666469/
tmp_string = r'''@echo off
if "%WINPYDIR%"=="" call %~dp0..\..\scripts\env.bat
python -m PyQt5.uic.pyuic %1 %2 %3 %4 %5 %6 %7 %8 %9'''
else:
tmp_string = r'''@echo off
if "%WINPYDIR%"=="" call %~dp0..\..\scripts\env.bat
python "%WINPYDIR%\Lib\site-packages\package.name\uic\pyuic.py" %1 %2 %3 %4 %5 %6 %7 %8 %9'''
self.create_file(package, 'pyuic%s.bat' % package.name[-1],
'Scripts', tmp_string.replace('package.name', package.name))
# Adding missing __init__.py files (fixes Issue 8)
uic_path = osp.join('Lib', 'site-packages', package.name, 'uic')
for dirname in ('Loader', 'port_v2', 'port_v3'):
self.create_file(package, '__init__.py',
osp.join(uic_path, dirname), '')
def _print(self, package, action):
"""Print package-related action text (e.g. 'Installing')
indicating progress"""
text = " ".join([action, package.name, package.version])
if self.verbose:
utils.print_box(text)
else:
if self.indent:
text = (' '*4) + text
print(text + '...', end=" ")
def _print_done(self):
"""Print OK at the end of a process"""
if not self.verbose:
print("OK")
def uninstall(self, package):
"""Uninstall package from distribution"""
self._print(package, "Uninstalling")
if isinstance(package, WininstPackage):
package.uninstall()
package.remove_log(self.logdir)
elif not package.name == 'pip':
# trick to get true target (if not current)
this_executable_path = os.path.dirname(self.logdir)
subprocess.call([this_executable_path + r'\python.exe',
'-m', 'pip', 'uninstall', package.name, '-y'],
cwd=this_executable_path)
# legacy, if some package installed by old non-pip means
package.load_log(self.logdir)
for fname in reversed(package.files):
path = osp.join(self.target, fname)
if osp.isfile(path):
if self.verbose:
print("remove: %s" % fname)
os.remove(path)
if fname.endswith('.py'):
for suffix in ('c', 'o'):
if osp.exists(path+suffix):
if self.verbose:
print("remove: %s" % (fname+suffix))
os.remove(path+suffix)
elif osp.isdir(path):
if self.verbose:
print("rmdir: %s" % fname)
pycache = osp.join(path, '__pycache__')
if osp.exists(pycache):
try:
shutil.rmtree(pycache, onerror=utils.onerror)
if self.verbose:
print("rmtree: %s" % pycache)
except WindowsError:
print("Directory %s could not be removed"
% pycache, file=sys.stderr)
try:
os.rmdir(path)
except OSError:
if self.verbose:
print("unable to remove directory: %s" % fname,
file=sys.stderr)
else:
if self.verbose:
print("file not found: %s" % fname, file=sys.stderr)
package.remove_log(self.logdir)
self._print_done()
def install_bdist_wininst(self, package):
"""Install a distutils package built with the bdist_wininst option
(binary distribution, .exe file)"""
self._print(package, "Extracting")
targetdir = utils.extract_archive(package.fname)
self._print_done()
self._print(package, "Installing %s from " % targetdir)
self.copy_files(package, targetdir, 'PURELIB',
osp.join('Lib', 'site-packages'))
self.copy_files(package, targetdir, 'PLATLIB',
osp.join('Lib', 'site-packages'))
self.copy_files(package, targetdir, 'SCRIPTS', 'Scripts',
create_bat_files=True)
self.copy_files(package, targetdir, 'DLLs', 'DLLs')
self.copy_files(package, targetdir, 'DATA', '.')
self._print_done()
def install_bdist_direct(self, package, install_options=None):
"""Install a package directly !"""
self._print(package, "Installing %s" % package.fname.split(".")[-1])
# targetdir = utils.extract_msi(package.fname, targetdir=self.target)
try:
fname = utils.direct_pip_install(package.fname,
python_exe=osp.join(self.target, 'python.exe'),
architecture=self.architecture, verbose=self.verbose,
install_options=install_options)
except RuntimeError:
if not self.verbose:
print("Failed!")
raise
package = Package(fname)
self._print_done()
def install_script(self, script, install_options=None):
try:
fname = utils.do_script(script,
python_exe=osp.join(self.target, 'python.exe'),
architecture=self.architecture, verbose=self.verbose,
install_options=install_options)
except RuntimeError:
if not self.verbose:
print("Failed!")
raise
def install_bdist_msi(self, package):
"""Install a distutils package built with the bdist_msi option
(binary distribution, .msi file)"""
raise NotImplementedError
# self._print(package, "Extracting")
# targetdir = utils.extract_msi(package.fname, targetdir=self.target)
# self._print_done()
def install_nsis_package(self, package):
"""Install a Python package built with NSIS (e.g. PyQt or PyQwt)
(binary distribution, .exe file)"""
bname = osp.basename(package.fname)
assert bname.startswith(self.NSIS_PACKAGES)
self._print(package, "Extracting")
targetdir = utils.extract_exe(package.fname)
self._print_done()
self._print(package, "Installing")
self.copy_files(package, targetdir, 'Lib', 'Lib')
if bname.startswith('PyQt5'):
# PyQt5
outdir = osp.join('Lib', 'site-packages', 'PyQt5')
elif bname.startswith('PyQt'):
# PyQt4
outdir = osp.join('Lib', 'site-packages', 'PyQt4')
else:
# Qwt5
outdir = osp.join('Lib', 'site-packages', 'PyQt4', 'Qwt5')
self.copy_files(package, targetdir, '$_OUTDIR', outdir)
self._print_done()
def main(test=False):
if test:
sbdir = osp.join(osp.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'sandbox')
tmpdir = osp.join(sbdir, 'tobedeleted')
# fname = osp.join(tmpdir, 'scipy-0.10.1.win-amd64-py2.7.exe')
fname = osp.join(sbdir, 'VTK-5.10.0-Qt-4.7.4.win32-py2.7.exe')
print(Package(fname))
sys.exit()
target = osp.join(utils.BASE_DIR, 'build',
'winpython-2.7.3', 'python-2.7.3')
fname = osp.join(utils.BASE_DIR, 'packages.src', 'docutils-0.9.1.tar.gz')
dist = Distribution(target, verbose=True)
pack = Package(fname)
print(pack.description)
# dist.install(pack)
# dist.uninstall(pack)
else:
parser = ArgumentParser(description="WinPython Package Manager: install, "\
"uninstall or upgrade Python packages on a Windows "\
"Python distribution like WinPython.")
parser.add_argument('fname', metavar='package',
type=str if py3compat.PY3 else unicode,
help='path to a Python package')
parser.add_argument('-t', '--target', dest='target', default=sys.prefix,
help='path to target Python distribution '\
'(default: "%s")' % sys.prefix)
parser.add_argument('-i', '--install', dest='install',
action='store_const', const=True, default=False,
help='install package (this is the default action)')
parser.add_argument('-u', '--uninstall', dest='uninstall',
action='store_const', const=True, default=False,
help='uninstall package')
args = parser.parse_args()
if args.install and args.uninstall:
raise RuntimeError("Incompatible arguments: --install and --uninstall")
if not args.install and not args.uninstall:
args.install = True
if not osp.isfile(args.fname):
raise IOError("File not found: %s" % args.fname)
if utils.is_python_distribution(args.target):
dist = Distribution(args.target)
try:
package = Package(args.fname)
if package.is_compatible_with(dist):
if args.install:
dist.install(package)
else:
dist.uninstall(package)
else:
raise RuntimeError("Package is not compatible with Python "\
"%s %dbit" % (dist.version, dist.architecture))
except NotImplementedError:
raise RuntimeError("Package is not (yet) supported by WPPM")
else:
raise WindowsError("Invalid Python distribution %s" % args.target)
if __name__ == '__main__':
main()
|
sharhar/USB-Thing
|
UpdaterFiles/Lib/python-3.5.1.amd64/Lib/site-packages/winpython/wppm.py
|
Python
|
apache-2.0
| 30,724
|
[
"VTK"
] |
d8a6113d174a7aa25c8fc057cca0a5000a09df46202af8a70c6f55b1a269e506
|
"""Use PyMOl to create templates for bomeba"""
import numpy as np
np.set_printoptions(precision=3)
import __main__
__main__.pymol_argv = ['pymol','-qck']
import pymol
from pymol import cmd, stored
pymol.finish_launching()
import openbabel as ob
def minimize(selection='all', forcefield='MMFF94s', method='cg',
nsteps= 2000, conv=1E-6, cutoff=False, cut_vdw=6.0, cut_elec=8.0):
pdb_string = cmd.get_pdbstr(selection)
name = cmd.get_legal_name(selection)
obconversion = ob.OBConversion()
obconversion.SetInAndOutFormats('pdb', 'pdb')
mol = ob.OBMol()
obconversion.ReadString(mol, pdb_string)
ff = ob.OBForceField.FindForceField(forcefield)
ff.Setup(mol)
if cutoff == True:
ff.EnableCutOff(True)
ff.SetVDWCutOff(cut_vdw)
ff.SetElectrostaticCutOff(cut_elec)
if method == 'cg':
ff.ConjugateGradients(nsteps, conv)
else:
ff.SteepestDescent(nsteps, conv)
ff.GetCoordinates(mol)
nrg = ff.Energy()
pdb_string = obconversion.WriteString(mol)
cmd.delete(name)
if name == 'all':
name = 'all_'
cmd.read_pdbstr(pdb_string, name)
return nrg
pbl = []
for res_name_i in aa:
for res_name_j in aa:
cmd.fab(res_name_i + res_name_j)
minimize(selection=sel, forcefield='GAFF')
a = cmd.get_distance('resi 1 and name C', 'resi 2 and name N')
pbl.append(a)
cmd.delete('all')
mean = sum(pbl) / len(pbl)
print(mean)
|
BIOS-IMASL/bomeba0
|
bomeba0/scaffold/compute_peptide_bond_lenght.py
|
Python
|
apache-2.0
| 1,471
|
[
"PyMOL"
] |
725abfb4f70b904ae3aac828c6835b89710f09b2f5d0f218fb38e838d959fa07
|
"""
@name: PyHouse/src/Modules/Computer/Web/_test/test_web_login.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com>
@copyright: (c) 2014-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Aug 29, 2014
@Summary:
Passed all 8 tests - DBK - 2017-01-11
"""
__updated__ = '2020-02-14'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
# Import PyMh files and modules.
from test.xml_data import XML_LONG, TESTING_PYHOUSE
from test.testing_mixin import SetupPyHouseObj
from Modules.Computer.Nodes.nodes_xml import Xml as nodesXml
from Modules.Computer.Web import web_login
from Modules.Computer.Web.test.xml_web import TESTING_LOGIN_NAME_0, TESTING_WEB_PORT
from Modules.Computer.Web.web import WorkspaceData
from Modules.Core.Utilities import json_tools
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
class SetupMixin(object):
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_web_login')
class A1_Setup(SetupMixin, unittest.TestCase):
"""
This section tests the above setup for things we will need further down in the tests.
"""
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Tags(self):
""" Be sure that the XML contains the right stuff.
"""
# print(PrettyFormatAny.form(self.m_xml, 'A1-01-A - Tags'))
self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)
self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')
self.assertEqual(self.m_xml.computer_div.tag, 'ComputerDivision')
self.assertEqual(self.m_xml.web_sect.tag, 'WebSection')
class A2_XML(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Port(self):
""" Be sure that the XML contains the right stuff.
"""
l_xml = self.m_xml.web_sect
# print(PrettyFormatAny.form(l_xml, 'A2-01-A - XML'))
self.assertEqual(l_xml.find('Port').text, TESTING_WEB_PORT)
def test_02_FindXml(self):
""" Be sure that the XML contains the right stuff.
"""
# print(PrettyFormatAny.form(self.m_xml.web_sect, 'A2-02-A - Web Xml'))
def test_03_ReadXML(self):
l_web = webXml.read_web_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.Computer.Web = l_web
# print(PrettyFormatAny.form(l_web, 'A2-03-A - Web Data'))
self.assertEqual(l_web.WebPort, 8580)
self.assertEqual(len(l_web.Logins), 2)
self.assertEqual(l_web.Logins[0].Name, TESTING_LOGIN_NAME_0)
def test_04_WriteXML(self):
l_web = webXml.read_web_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.Computer.Web = l_web
# print(PrettyFormatAny.form(l_web, 'A2-04-A - Web Data'))
l_xml = webXml.write_web_xml(self.m_pyhouse_obj)
# print(PrettyFormatAny.form(l_xml, 'A2-04-B - XML'))
pass
class C1_Element(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
l_web = webXml.read_web_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.Computer.Web = l_web
l_nodes = nodesXml.read_all_nodes_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.Computer.Nodes = l_nodes
self.m_worksapce = WorkspaceData
self.m_worksapce.m_pyhouse_obj = self.m_pyhouse_obj
def test_01_DoLogin(self):
pass
def test_02_ValidList(self):
l_json = web_login.LoginElement(self.m_worksapce).getValidLists()
l_test = json_tools.decode_json_unicode(l_json)
# print(PrettyFormatAny.form(l_test, 'C1-02-A - JSON', 40))
self.assertEqual(l_test['Families'], VALID_ FAMILIES)
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Web/test/test_web_login.py
|
Python
|
mit
| 3,972
|
[
"Brian"
] |
0c5972c63d916d19fb79f306d41752cca2e87fa1ce836fd03f8a66b4b939d3b4
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from concurrent import futures
from functools import partial, reduce
import json
from collections.abc import Collection
import numpy as np
import os
import re
import operator
import urllib.parse
import warnings
import pyarrow as pa
import pyarrow.lib as lib
import pyarrow._parquet as _parquet
from pyarrow._parquet import (ParquetReader, Statistics, # noqa
FileMetaData, RowGroupMetaData,
ColumnChunkMetaData,
ParquetSchema, ColumnSchema)
from pyarrow.fs import (LocalFileSystem, FileSystem,
_resolve_filesystem_and_path, _ensure_filesystem)
from pyarrow import filesystem as legacyfs
from pyarrow.util import guid, _is_path_like, _stringify_path
_URI_STRIP_SCHEMES = ('hdfs',)
def _parse_uri(path):
path = _stringify_path(path)
parsed_uri = urllib.parse.urlparse(path)
if parsed_uri.scheme in _URI_STRIP_SCHEMES:
return parsed_uri.path
else:
# ARROW-4073: On Windows returning the path with the scheme
# stripped removes the drive letter, if any
return path
def _get_filesystem_and_path(passed_filesystem, path):
if passed_filesystem is None:
return legacyfs.resolve_filesystem_and_path(path, passed_filesystem)
else:
passed_filesystem = legacyfs._ensure_filesystem(passed_filesystem)
parsed_path = _parse_uri(path)
return passed_filesystem, parsed_path
def _check_contains_null(val):
if isinstance(val, bytes):
for byte in val:
if isinstance(byte, bytes):
compare_to = chr(0)
else:
compare_to = 0
if byte == compare_to:
return True
elif isinstance(val, str):
return '\x00' in val
return False
def _check_filters(filters, check_null_strings=True):
"""
Check if filters are well-formed.
"""
if filters is not None:
if len(filters) == 0 or any(len(f) == 0 for f in filters):
raise ValueError("Malformed filters")
if isinstance(filters[0][0], str):
# We have encountered the situation where we have one nesting level
# too few:
# We have [(,,), ..] instead of [[(,,), ..]]
filters = [filters]
if check_null_strings:
for conjunction in filters:
for col, op, val in conjunction:
if (
isinstance(val, list) and
all(_check_contains_null(v) for v in val) or
_check_contains_null(val)
):
raise NotImplementedError(
"Null-terminated binary strings are not supported "
"as filter values."
)
return filters
_DNF_filter_doc = """Predicates are expressed in disjunctive normal form (DNF), like
``[[('x', '=', 0), ...], ...]``. DNF allows arbitrary boolean logical
combinations of single column predicates. The innermost tuples each
describe a single column predicate. The list of inner predicates is
interpreted as a conjunction (AND), forming a more selective and
multiple column predicate. Finally, the most outer list combines these
filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
Examples:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
"""
def _filters_to_expression(filters):
"""
Check if filters are well-formed.
See _DNF_filter_doc above for more details.
"""
import pyarrow.dataset as ds
if isinstance(filters, ds.Expression):
return filters
filters = _check_filters(filters, check_null_strings=False)
def convert_single_predicate(col, op, val):
field = ds.field(col)
if op == "=" or op == "==":
return field == val
elif op == "!=":
return field != val
elif op == '<':
return field < val
elif op == '>':
return field > val
elif op == '<=':
return field <= val
elif op == '>=':
return field >= val
elif op == 'in':
return field.isin(val)
elif op == 'not in':
return ~field.isin(val)
else:
raise ValueError(
'"{0}" is not a valid operator in predicates.'.format(
(col, op, val)))
disjunction_members = []
for conjunction in filters:
conjunction_members = [
convert_single_predicate(col, op, val)
for col, op, val in conjunction
]
disjunction_members.append(reduce(operator.and_, conjunction_members))
return reduce(operator.or_, disjunction_members)
# ----------------------------------------------------------------------
# Reading a single Parquet file
class ParquetFile:
"""
Reader interface for a single Parquet file.
Parameters
----------
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
Readable source. For passing bytes or buffer-like file containing a
Parquet file, use pyarrow.BufferReader.
metadata : FileMetaData, default None
Use existing metadata object, rather than reading from file.
common_metadata : FileMetaData, default None
Will be used in reads for pandas schema metadata if not found in the
main file's metadata, no other uses at the moment.
memory_map : bool, default False
If the source is a file path, use a memory map to read file, which can
improve performance in some environments.
buffer_size : int, default 0
If positive, perform read buffering when deserializing individual
column chunks. Otherwise IO calls are unbuffered.
pre_buffer : bool, default False
Coalesce and issue file reads in parallel to improve performance on
high-latency filesystems (e.g. S3). If True, Arrow will use a
background I/O thread pool.
read_dictionary : list
List of column names to read directly as DictionaryArray.
coerce_int96_timestamp_unit : str, default None.
Cast timestamps that are stored in INT96 format to a particular
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
and therefore INT96 timestamps will be inferred as timestamps
in nanoseconds.
"""
def __init__(self, source, metadata=None, common_metadata=None,
read_dictionary=None, memory_map=False, buffer_size=0,
pre_buffer=False, coerce_int96_timestamp_unit=None):
self.reader = ParquetReader()
self.reader.open(
source, use_memory_map=memory_map,
buffer_size=buffer_size, pre_buffer=pre_buffer,
read_dictionary=read_dictionary, metadata=metadata,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
self.common_metadata = common_metadata
self._nested_paths_by_prefix = self._build_nested_paths()
def _build_nested_paths(self):
paths = self.reader.column_paths
result = defaultdict(list)
for i, path in enumerate(paths):
key = path[0]
rest = path[1:]
while True:
result[key].append(i)
if not rest:
break
key = '.'.join((key, rest[0]))
rest = rest[1:]
return result
@property
def metadata(self):
return self.reader.metadata
@property
def schema(self):
"""
Return the Parquet schema, unconverted to Arrow types
"""
return self.metadata.schema
@property
def schema_arrow(self):
"""
Return the inferred Arrow schema, converted from the whole Parquet
file's schema
"""
return self.reader.schema_arrow
@property
def num_row_groups(self):
return self.reader.num_row_groups
def read_row_group(self, i, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a single row group from a Parquet file.
Parameters
----------
i : int
Index of the individual row group that we want to read.
columns : list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the row group as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_group(i, column_indices=column_indices,
use_threads=use_threads)
def read_row_groups(self, row_groups, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a multiple row groups from a Parquet file.
Parameters
----------
row_groups : list
Only these row groups will be read from the file.
columns : list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the row groups as a table (of columns).
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_groups(row_groups,
column_indices=column_indices,
use_threads=use_threads)
def iter_batches(self, batch_size=65536, row_groups=None, columns=None,
use_threads=True, use_pandas_metadata=False):
"""
Read streaming batches from a Parquet file
Parameters
----------
batch_size : int, default 64K
Maximum number of records to yield per batch. Batches may be
smaller if there aren't enough rows in the file.
row_groups : list
Only these row groups will be read from the file.
columns : list
If not None, only these columns will be read from the file. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : boolean, default True
Perform multi-threaded column reads.
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
iterator of pyarrow.RecordBatch
Contents of each batch as a record batch
"""
if row_groups is None:
row_groups = range(0, self.metadata.num_row_groups)
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
batches = self.reader.iter_batches(batch_size,
row_groups=row_groups,
column_indices=column_indices,
use_threads=use_threads)
return batches
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read a Table from Parquet format,
Parameters
----------
columns : list
If not None, only these columns will be read from the file. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the file as a table (of columns).
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_all(column_indices=column_indices,
use_threads=use_threads)
def scan_contents(self, columns=None, batch_size=65536):
"""
Read contents of file for the given columns and batch size.
Notes
-----
This function's primary purpose is benchmarking.
The scan is executed on a single thread.
Parameters
----------
columns : list of integers, default None
Select columns to read, if None scan all columns.
batch_size : int, default 64K
Number of rows to read at a time internally.
Returns
-------
num_rows : number of rows in file
"""
column_indices = self._get_column_indices(columns)
return self.reader.scan_contents(column_indices,
batch_size=batch_size)
def _get_column_indices(self, column_names, use_pandas_metadata=False):
if column_names is None:
return None
indices = []
for name in column_names:
if name in self._nested_paths_by_prefix:
indices.extend(self._nested_paths_by_prefix[name])
if use_pandas_metadata:
file_keyvalues = self.metadata.metadata
common_keyvalues = (self.common_metadata.metadata
if self.common_metadata is not None
else None)
if file_keyvalues and b'pandas' in file_keyvalues:
index_columns = _get_pandas_index_columns(file_keyvalues)
elif common_keyvalues and b'pandas' in common_keyvalues:
index_columns = _get_pandas_index_columns(common_keyvalues)
else:
index_columns = []
if indices is not None and index_columns:
indices += [self.reader.column_name_idx(descr)
for descr in index_columns
if not isinstance(descr, dict)]
return indices
_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
def _sanitized_spark_field_name(name):
return _SPARK_DISALLOWED_CHARS.sub('_', name)
def _sanitize_schema(schema, flavor):
if 'spark' in flavor:
sanitized_fields = []
schema_changed = False
for field in schema:
name = field.name
sanitized_name = _sanitized_spark_field_name(name)
if sanitized_name != name:
schema_changed = True
sanitized_field = pa.field(sanitized_name, field.type,
field.nullable, field.metadata)
sanitized_fields.append(sanitized_field)
else:
sanitized_fields.append(field)
new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
return new_schema, schema_changed
else:
return schema, False
def _sanitize_table(table, new_schema, flavor):
# TODO: This will not handle prohibited characters in nested field names
if 'spark' in flavor:
column_data = [table[i] for i in range(table.num_columns)]
return pa.Table.from_arrays(column_data, schema=new_schema)
else:
return table
_parquet_writer_arg_docs = """version : {"1.0", "2.4", "2.6"}, default "1.0"
Determine which Parquet logical types are available for use, whether the
reduced set from the Parquet 1.x.x format or the expanded logical types
added in later format versions.
Files written with version='2.4' or '2.6' may not be readable in all
Parquet implementations, so version='1.0' is likely the choice that
maximizes file compatibility.
UINT32 and some logical types are only available with version '2.4'.
Nanosecond timestamps are only available with version '2.6'.
Other features such as compression algorithms or the new serialized
data page format must be enabled separately (see 'compression' and
'data_page_version').
use_dictionary : bool or list
Specify if we should use dictionary encoding in general or only for
some columns.
use_deprecated_int96_timestamps : bool, default None
Write timestamps to INT96 Parquet format. Defaults to False unless enabled
by flavor argument. This take priority over the coerce_timestamps option.
coerce_timestamps : str, default None
Cast timestamps to a particular resolution. If omitted, defaults are chosen
depending on `version`. By default, for ``version='1.0'`` (the default)
and ``version='2.4'``, nanoseconds are cast to microseconds ('us'), while
for other `version` values, they are written natively without loss
of resolution. Seconds are always cast to milliseconds ('ms') by default,
as Parquet does not have any temporal type with seconds resolution.
If the casting results in loss of data, it will raise an exception
unless ``allow_truncated_timestamps=True`` is given.
Valid values: {None, 'ms', 'us'}
data_page_size : int, default None
Set a target threshold for the approximate encoded size of data
pages within a column chunk (in bytes). If None, use the default data page
size of 1MByte.
allow_truncated_timestamps : bool, default False
Allow loss of data when coercing timestamps to a particular
resolution. E.g. if microsecond or nanosecond data is lost when coercing to
'ms', do not raise an exception. Passing ``allow_truncated_timestamp=True``
will NOT result in the truncation exception being ignored unless
``coerce_timestamps`` is not None.
compression : str or dict
Specify the compression codec, either on a general basis or per-column.
Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}.
write_statistics : bool or list
Specify if we should write statistics in general (default is True) or only
for some columns.
flavor : {'spark'}, default None
Sanitize schema or set other compatibility options to work with
various target systems.
filesystem : FileSystem, default None
If nothing passed, will be inferred from `where` if path-like, else
`where` is already a file-like object so no filesystem is needed.
compression_level : int or dict, default None
Specify the compression level for a codec, either on a general basis or
per-column. If None is passed, arrow selects the compression level for
the compression codec in use. The compression level has a different
meaning for each codec, so you have to read the documentation of the
codec you are using.
An exception is thrown if the compression codec does not allow specifying
a compression level.
use_byte_stream_split : bool or list, default False
Specify if the byte_stream_split encoding should be used in general or
only for some columns. If both dictionary and byte_stream_stream are
enabled, then dictionary is preferred.
The byte_stream_split encoding is valid only for floating-point data types
and should be combined with a compression codec.
column_encoding : string or dict, default None
Specify the encoding scheme on a per column basis.
Currently supported values: {'PLAIN', 'BYTE_STREAM_SPLIT'}.
Certain encodings are only compatible with certain data types.
Please refer to the encodings section of `Reading and writing Parquet
files <https://arrow.apache.org/docs/cpp/parquet.html#encodings>`_.
data_page_version : {"1.0", "2.0"}, default "1.0"
The serialized Parquet data page format version to write, defaults to
1.0. This does not impact the file schema logical types and Arrow to
Parquet type casting behavior; for that use the "version" option.
use_compliant_nested_type : bool, default False
Whether to write compliant Parquet nested type (lists) as defined
`here <https://github.com/apache/parquet-format/blob/master/
LogicalTypes.md#nested-types>`_, defaults to ``False``.
For ``use_compliant_nested_type=True``, this will write into a list
with 3-level structure where the middle level, named ``list``,
is a repeated group with a single field named ``element``::
<list-repetition> group <name> (LIST) {
repeated group list {
<element-repetition> <element-type> element;
}
}
For ``use_compliant_nested_type=False``, this will also write into a list
with 3-level structure, where the name of the single field of the middle
level ``list`` is taken from the element name for nested columns in Arrow,
which defaults to ``item``::
<list-repetition> group <name> (LIST) {
repeated group list {
<element-repetition> <element-type> item;
}
}
"""
class ParquetWriter:
__doc__ = """
Class for incrementally building a Parquet file for Arrow tables.
Parameters
----------
where : path or file-like object
schema : pyarrow.Schema
{}
writer_engine_version : unused
**options : dict
If options contains a key `metadata_collector` then the
corresponding value is assumed to be a list (or any object with
`.append` method) that will be filled with the file metadata instance
of the written file.
""".format(_parquet_writer_arg_docs)
def __init__(self, where, schema, filesystem=None,
flavor=None,
version='1.0',
use_dictionary=True,
compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
compression_level=None,
use_byte_stream_split=False,
column_encoding=None,
writer_engine_version=None,
data_page_version='1.0',
use_compliant_nested_type=False,
**options):
if use_deprecated_int96_timestamps is None:
# Use int96 timestamps for Spark
if flavor is not None and 'spark' in flavor:
use_deprecated_int96_timestamps = True
else:
use_deprecated_int96_timestamps = False
self.flavor = flavor
if flavor is not None:
schema, self.schema_changed = _sanitize_schema(schema, flavor)
else:
self.schema_changed = False
self.schema = schema
self.where = where
# If we open a file using a filesystem, store file handle so we can be
# sure to close it when `self.close` is called.
self.file_handle = None
filesystem, path = _resolve_filesystem_and_path(
where, filesystem, allow_legacy_filesystem=True
)
if filesystem is not None:
if isinstance(filesystem, legacyfs.FileSystem):
# legacy filesystem (eg custom subclass)
# TODO deprecate
sink = self.file_handle = filesystem.open(path, 'wb')
else:
# ARROW-10480: do not auto-detect compression. While
# a filename like foo.parquet.gz is nonconforming, it
# shouldn't implicitly apply compression.
sink = self.file_handle = filesystem.open_output_stream(
path, compression=None)
else:
sink = where
self._metadata_collector = options.pop('metadata_collector', None)
engine_version = 'V2'
self.writer = _parquet.ParquetWriter(
sink, schema,
version=version,
compression=compression,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
compression_level=compression_level,
use_byte_stream_split=use_byte_stream_split,
column_encoding=column_encoding,
writer_engine_version=engine_version,
data_page_version=data_page_version,
use_compliant_nested_type=use_compliant_nested_type,
**options)
self.is_open = True
def __del__(self):
if getattr(self, 'is_open', False):
self.close()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
# return false since we want to propagate exceptions
return False
def write(self, table_or_batch, row_group_size=None):
"""
Write RecordBatch or Table to the Parquet file.
Parameters
----------
table_or_batch : {RecordBatch, Table}
row_group_size : int, default None
Maximum size of each written row group. If None, the
row group size will be the minimum of the input
table or batch length and 64 * 1024 * 1024.
"""
if isinstance(table_or_batch, pa.RecordBatch):
self.write_batch(table_or_batch, row_group_size)
elif isinstance(table_or_batch, pa.Table):
self.write_table(table_or_batch, row_group_size)
else:
raise TypeError(type(table_or_batch))
def write_batch(self, batch, row_group_size=None):
"""
Write RecordBatch to the Parquet file.
Parameters
----------
batch : RecordBatch
row_group_size : int, default None
Maximum size of each written row group. If None, the
row group size will be the minimum of the RecordBatch
size and 64 * 1024 * 1024.
"""
table = pa.Table.from_batches([batch], batch.schema)
self.write_table(table, row_group_size)
def write_table(self, table, row_group_size=None):
"""
Write Table to the Parquet file.
Parameters
----------
table : Table
row_group_size : int, default None
Maximum size of each written row group. If None, the
row group size will be the minimum of the Table size
and 64 * 1024 * 1024.
"""
if self.schema_changed:
table = _sanitize_table(table, self.schema, self.flavor)
assert self.is_open
if not table.schema.equals(self.schema, check_metadata=False):
msg = ('Table schema does not match schema used to create file: '
'\ntable:\n{!s} vs. \nfile:\n{!s}'
.format(table.schema, self.schema))
raise ValueError(msg)
self.writer.write_table(table, row_group_size=row_group_size)
def close(self):
if self.is_open:
self.writer.close()
self.is_open = False
if self._metadata_collector is not None:
self._metadata_collector.append(self.writer.metadata)
if self.file_handle is not None:
self.file_handle.close()
def _get_pandas_index_columns(keyvalues):
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
['index_columns'])
# ----------------------------------------------------------------------
# Metadata container providing instructions about reading a single Parquet
# file, possibly part of a partitioned dataset
class ParquetDatasetPiece:
"""
DEPRECATED: A single chunk of a potentially larger Parquet dataset to read.
The arguments will indicate to read either a single row group or all row
groups, and whether to add partition keys to the resulting pyarrow.Table.
.. deprecated:: 5.0
Directly constructing a ``ParquetDatasetPiece`` is deprecated, as well
as accessing the pieces of a ``ParquetDataset`` object. Specify
``use_legacy_dataset=False`` when constructing the ``ParquetDataset``
and use the ``ParquetDataset.fragments`` attribute instead.
Parameters
----------
path : str or pathlib.Path
Path to file in the file system where this piece is located.
open_file_func : callable
Function to use for obtaining file handle to dataset piece.
partition_keys : list of tuples
Two-element tuples of ``(column name, ordinal index)``.
row_group : int, default None
Row group to load. By default, reads all row groups.
file_options : dict
Options
"""
def __init__(self, path, open_file_func=partial(open, mode='rb'),
file_options=None, row_group=None, partition_keys=None):
warnings.warn(
"ParquetDatasetPiece is deprecated as of pyarrow 5.0.0 and will "
"be removed in a future version.",
DeprecationWarning, stacklevel=2)
self._init(
path, open_file_func, file_options, row_group, partition_keys)
@staticmethod
def _create(path, open_file_func=partial(open, mode='rb'),
file_options=None, row_group=None, partition_keys=None):
self = ParquetDatasetPiece.__new__(ParquetDatasetPiece)
self._init(
path, open_file_func, file_options, row_group, partition_keys)
return self
def _init(self, path, open_file_func, file_options, row_group,
partition_keys):
self.path = _stringify_path(path)
self.open_file_func = open_file_func
self.row_group = row_group
self.partition_keys = partition_keys or []
self.file_options = file_options or {}
def __eq__(self, other):
if not isinstance(other, ParquetDatasetPiece):
return False
return (self.path == other.path and
self.row_group == other.row_group and
self.partition_keys == other.partition_keys)
def __repr__(self):
return ('{}({!r}, row_group={!r}, partition_keys={!r})'
.format(type(self).__name__, self.path,
self.row_group,
self.partition_keys))
def __str__(self):
result = ''
if len(self.partition_keys) > 0:
partition_str = ', '.join('{}={}'.format(name, index)
for name, index in self.partition_keys)
result += 'partition[{}] '.format(partition_str)
result += self.path
if self.row_group is not None:
result += ' | row_group={}'.format(self.row_group)
return result
def get_metadata(self):
"""
Return the file's metadata.
Returns
-------
metadata : FileMetaData
"""
f = self.open()
return f.metadata
def open(self):
"""
Return instance of ParquetFile.
"""
reader = self.open_file_func(self.path)
if not isinstance(reader, ParquetFile):
reader = ParquetFile(reader, **self.file_options)
return reader
def read(self, columns=None, use_threads=True, partitions=None,
file=None, use_pandas_metadata=False):
"""
Read this piece as a pyarrow.Table.
Parameters
----------
columns : list of column names, default None
use_threads : bool, default True
Perform multi-threaded column reads.
partitions : ParquetPartitions, default None
file : file-like object
Passed to ParquetFile.
use_pandas_metadata : bool
If pandas metadata should be used or not.
Returns
-------
table : pyarrow.Table
"""
if self.open_file_func is not None:
reader = self.open()
elif file is not None:
reader = ParquetFile(file, **self.file_options)
else:
# try to read the local path
reader = ParquetFile(self.path, **self.file_options)
options = dict(columns=columns,
use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if self.row_group is not None:
table = reader.read_row_group(self.row_group, **options)
else:
table = reader.read(**options)
if len(self.partition_keys) > 0:
if partitions is None:
raise ValueError('Must pass partition sets')
# Here, the index is the categorical code of the partition where
# this piece is located. Suppose we had
#
# /foo=a/0.parq
# /foo=b/0.parq
# /foo=c/0.parq
#
# Then we assign a=0, b=1, c=2. And the resulting Table pieces will
# have a DictionaryArray column named foo having the constant index
# value as indicated. The distinct categories of the partition have
# been computed in the ParquetManifest
for i, (name, index) in enumerate(self.partition_keys):
# The partition code is the same for all values in this piece
indices = np.full(len(table), index, dtype='i4')
# This is set of all partition values, computed as part of the
# manifest, so ['a', 'b', 'c'] as in our example above.
dictionary = partitions.levels[i].dictionary
arr = pa.DictionaryArray.from_arrays(indices, dictionary)
table = table.append_column(name, arr)
return table
class PartitionSet:
"""
A data structure for cataloguing the observed Parquet partitions at a
particular level. So if we have
/foo=a/bar=0
/foo=a/bar=1
/foo=a/bar=2
/foo=b/bar=0
/foo=b/bar=1
/foo=b/bar=2
Then we have two partition sets, one for foo, another for bar. As we visit
levels of the partition hierarchy, a PartitionSet tracks the distinct
values and assigns categorical codes to use when reading the pieces
Parameters
----------
name : str
Name of the partition set. Under which key to collect all values.
keys : list
All possible values that have been collected for that partition set.
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or []
self.key_indices = {k: i for i, k in enumerate(self.keys)}
self._dictionary = None
def get_index(self, key):
"""
Get the index of the partition value if it is known, otherwise assign
one
Parameters
----------
key : The value for which we want to known the index.
"""
if key in self.key_indices:
return self.key_indices[key]
else:
index = len(self.key_indices)
self.keys.append(key)
self.key_indices[key] = index
return index
@property
def dictionary(self):
if self._dictionary is not None:
return self._dictionary
if len(self.keys) == 0:
raise ValueError('No known partition keys')
# Only integer and string partition types are supported right now
try:
integer_keys = [int(x) for x in self.keys]
dictionary = lib.array(integer_keys)
except ValueError:
dictionary = lib.array(self.keys)
self._dictionary = dictionary
return dictionary
@property
def is_sorted(self):
return list(self.keys) == sorted(self.keys)
class ParquetPartitions:
def __init__(self):
self.levels = []
self.partition_names = set()
def __len__(self):
return len(self.levels)
def __getitem__(self, i):
return self.levels[i]
def equals(self, other):
if not isinstance(other, ParquetPartitions):
raise TypeError('`other` must be an instance of ParquetPartitions')
return (self.levels == other.levels and
self.partition_names == other.partition_names)
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def get_index(self, level, name, key):
"""
Record a partition value at a particular level, returning the distinct
code for that value at that level.
Examples
--------
partitions.get_index(1, 'foo', 'a') returns 0
partitions.get_index(1, 'foo', 'b') returns 1
partitions.get_index(1, 'foo', 'c') returns 2
partitions.get_index(1, 'foo', 'a') returns 0
Parameters
----------
level : int
The nesting level of the partition we are observing
name : str
The partition name
key : str or int
The partition value
"""
if level == len(self.levels):
if name in self.partition_names:
raise ValueError('{} was the name of the partition in '
'another level'.format(name))
part_set = PartitionSet(name)
self.levels.append(part_set)
self.partition_names.add(name)
return self.levels[level].get_index(key)
def filter_accepts_partition(self, part_key, filter, level):
p_column, p_value_index = part_key
f_column, op, f_value = filter
if p_column != f_column:
return True
f_type = type(f_value)
if op in {'in', 'not in'}:
if not isinstance(f_value, Collection):
raise TypeError(
"'%s' object is not a collection", f_type.__name__)
if not f_value:
raise ValueError("Cannot use empty collection as filter value")
if len({type(item) for item in f_value}) != 1:
raise ValueError("All elements of the collection '%s' must be"
" of same type", f_value)
f_type = type(next(iter(f_value)))
elif not isinstance(f_value, str) and isinstance(f_value, Collection):
raise ValueError(
"Op '%s' not supported with a collection value", op)
p_value = f_type(self.levels[level]
.dictionary[p_value_index].as_py())
if op == "=" or op == "==":
return p_value == f_value
elif op == "!=":
return p_value != f_value
elif op == '<':
return p_value < f_value
elif op == '>':
return p_value > f_value
elif op == '<=':
return p_value <= f_value
elif op == '>=':
return p_value >= f_value
elif op == 'in':
return p_value in f_value
elif op == 'not in':
return p_value not in f_value
else:
raise ValueError("'%s' is not a valid operator in predicates.",
filter[1])
class ParquetManifest:
def __init__(self, dirpath, open_file_func=None, filesystem=None,
pathsep='/', partition_scheme='hive', metadata_nthreads=1):
filesystem, dirpath = _get_filesystem_and_path(filesystem, dirpath)
self.filesystem = filesystem
self.open_file_func = open_file_func
self.pathsep = pathsep
self.dirpath = _stringify_path(dirpath)
self.partition_scheme = partition_scheme
self.partitions = ParquetPartitions()
self.pieces = []
self._metadata_nthreads = metadata_nthreads
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=metadata_nthreads)
self.common_metadata_path = None
self.metadata_path = None
self._visit_level(0, self.dirpath, [])
# Due to concurrency, pieces will potentially by out of order if the
# dataset is partitioned so we sort them to yield stable results
self.pieces.sort(key=lambda piece: piece.path)
if self.common_metadata_path is None:
# _common_metadata is a subset of _metadata
self.common_metadata_path = self.metadata_path
self._thread_pool.shutdown()
def _visit_level(self, level, base_path, part_keys):
fs = self.filesystem
_, directories, files = next(fs.walk(base_path))
filtered_files = []
for path in files:
full_path = self.pathsep.join((base_path, path))
if path.endswith('_common_metadata'):
self.common_metadata_path = full_path
elif path.endswith('_metadata'):
self.metadata_path = full_path
elif self._should_silently_exclude(path):
continue
else:
filtered_files.append(full_path)
# ARROW-1079: Filter out "private" directories starting with underscore
filtered_directories = [self.pathsep.join((base_path, x))
for x in directories
if not _is_private_directory(x)]
filtered_files.sort()
filtered_directories.sort()
if len(filtered_files) > 0 and len(filtered_directories) > 0:
raise ValueError('Found files in an intermediate '
'directory: {}'.format(base_path))
elif len(filtered_directories) > 0:
self._visit_directories(level, filtered_directories, part_keys)
else:
self._push_pieces(filtered_files, part_keys)
def _should_silently_exclude(self, file_name):
return (file_name.endswith('.crc') or # Checksums
file_name.endswith('_$folder$') or # HDFS directories in S3
file_name.startswith('.') or # Hidden files starting with .
file_name.startswith('_') or # Hidden files starting with _
file_name in EXCLUDED_PARQUET_PATHS)
def _visit_directories(self, level, directories, part_keys):
futures_list = []
for path in directories:
head, tail = _path_split(path, self.pathsep)
name, key = _parse_hive_partition(tail)
index = self.partitions.get_index(level, name, key)
dir_part_keys = part_keys + [(name, index)]
# If you have less threads than levels, the wait call will block
# indefinitely due to multiple waits within a thread.
if level < self._metadata_nthreads:
future = self._thread_pool.submit(self._visit_level,
level + 1,
path,
dir_part_keys)
futures_list.append(future)
else:
self._visit_level(level + 1, path, dir_part_keys)
if futures_list:
futures.wait(futures_list)
def _parse_partition(self, dirname):
if self.partition_scheme == 'hive':
return _parse_hive_partition(dirname)
else:
raise NotImplementedError('partition schema: {}'
.format(self.partition_scheme))
def _push_pieces(self, files, part_keys):
self.pieces.extend([
ParquetDatasetPiece._create(path, partition_keys=part_keys,
open_file_func=self.open_file_func)
for path in files
])
def _parse_hive_partition(value):
if '=' not in value:
raise ValueError('Directory name did not appear to be a '
'partition: {}'.format(value))
return value.split('=', 1)
def _is_private_directory(x):
_, tail = os.path.split(x)
return (tail.startswith('_') or tail.startswith('.')) and '=' not in tail
def _path_split(path, sep):
i = path.rfind(sep) + 1
head, tail = path[:i], path[i:]
head = head.rstrip(sep)
return head, tail
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
class _ParquetDatasetMetadata:
__slots__ = ('fs', 'memory_map', 'read_dictionary', 'common_metadata',
'buffer_size')
def _open_dataset_file(dataset, path, meta=None):
if (dataset.fs is not None and
not isinstance(dataset.fs, legacyfs.LocalFileSystem)):
path = dataset.fs.open(path, mode='rb')
return ParquetFile(
path,
metadata=meta,
memory_map=dataset.memory_map,
read_dictionary=dataset.read_dictionary,
common_metadata=dataset.common_metadata,
buffer_size=dataset.buffer_size
)
_DEPR_MSG = (
"'{}' attribute is deprecated as of pyarrow 5.0.0 and will be removed "
"in a future version.{}"
)
_read_docstring_common = """\
read_dictionary : list, default None
List of names or column paths (for nested types) to read directly
as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
a flat column as dictionary-encoded pass the column name. For
nested types, you must pass the full column "path", which could be
something like level1.level2.list.item. Refer to the Parquet
file's schema to obtain the paths.
memory_map : bool, default False
If the source is a file path, use a memory map to read file, which can
improve performance in some environments.
buffer_size : int, default 0
If positive, perform read buffering when deserializing individual
column chunks. Otherwise IO calls are unbuffered.
partitioning : pyarrow.dataset.Partitioning or str or list of str, \
default "hive"
The partitioning scheme for a partitioned dataset. The default of "hive"
assumes directory names with key=value pairs like "/year=2009/month=11".
In addition, a scheme like "/2009/11" is also supported, in which case
you need to specify the field names or a full schema. See the
``pyarrow.dataset.partitioning()`` function for more details."""
class ParquetDataset:
__doc__ = """
Encapsulates details of reading a complete Parquet dataset possibly
consisting of multiple files and partitions in subdirectories.
Parameters
----------
path_or_paths : str or List[str]
A directory name, single file name, or list of file names.
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem.
metadata : pyarrow.parquet.FileMetaData
Use metadata obtained elsewhere to validate file schemas.
schema : pyarrow.parquet.Schema
Use schema obtained elsewhere to validate file schemas. Alternative to
metadata parameter.
split_row_groups : bool, default False
Divide files into pieces for each row group in the file.
validate_schema : bool, default True
Check that individual file schemas are all the same / compatible.
filters : List[Tuple] or List[List[Tuple]] or None (default)
Rows which do not match the filter predicate will be removed from scanned
data. Partition keys embedded in a nested directory structure will be
exploited to avoid loading files at all if they contain no matching rows.
If `use_legacy_dataset` is True, filters can only reference partition
keys and only a hive-style directory structure is supported. When
setting `use_legacy_dataset` to False, also within-file level filtering
and different partitioning schemes are supported.
{1}
metadata_nthreads : int, default 1
How many threads to allow the thread pool which is used to read the
dataset metadata. Increasing this is helpful to read partitioned
datasets.
{0}
use_legacy_dataset : bool, default True
Set to False to enable the new code path (experimental, using the
new Arrow Dataset API). Among other things, this allows to pass
`filters` for all columns and not only the partition keys, enables
different partitioning schemes, etc.
pre_buffer : bool, default True
Coalesce and issue file reads in parallel to improve performance on
high-latency filesystems (e.g. S3). If True, Arrow will use a
background I/O thread pool. This option is only supported for
use_legacy_dataset=False. If using a filesystem layer that itself
performs readahead (e.g. fsspec's S3FS), disable readahead for best
results.
coerce_int96_timestamp_unit : str, default None.
Cast timestamps that are stored in INT96 format to a particular resolution
(e.g. 'ms'). Setting to None is equivalent to 'ns' and therefore INT96
timestamps will be inferred as timestamps in nanoseconds.
""".format(_read_docstring_common, _DNF_filter_doc)
def __new__(cls, path_or_paths=None, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, read_dictionary=None,
memory_map=False, buffer_size=0, partitioning="hive",
use_legacy_dataset=None, pre_buffer=True,
coerce_int96_timestamp_unit=None):
if use_legacy_dataset is None:
# if a new filesystem is passed -> default to new implementation
if isinstance(filesystem, FileSystem):
use_legacy_dataset = False
# otherwise the default is still True
else:
use_legacy_dataset = True
if not use_legacy_dataset:
return _ParquetDatasetV2(
path_or_paths, filesystem=filesystem,
filters=filters,
partitioning=partitioning,
read_dictionary=read_dictionary,
memory_map=memory_map,
buffer_size=buffer_size,
pre_buffer=pre_buffer,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
# unsupported keywords
schema=schema, metadata=metadata,
split_row_groups=split_row_groups,
validate_schema=validate_schema,
metadata_nthreads=metadata_nthreads
)
self = object.__new__(cls)
return self
def __init__(self, path_or_paths, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, read_dictionary=None,
memory_map=False, buffer_size=0, partitioning="hive",
use_legacy_dataset=True, pre_buffer=True,
coerce_int96_timestamp_unit=None):
if partitioning != "hive":
raise ValueError(
'Only "hive" for hive-like partitioning is supported when '
'using use_legacy_dataset=True')
self._metadata = _ParquetDatasetMetadata()
a_path = path_or_paths
if isinstance(a_path, list):
a_path = a_path[0]
self._metadata.fs, _ = _get_filesystem_and_path(filesystem, a_path)
if isinstance(path_or_paths, list):
self.paths = [_parse_uri(path) for path in path_or_paths]
else:
self.paths = _parse_uri(path_or_paths)
self._metadata.read_dictionary = read_dictionary
self._metadata.memory_map = memory_map
self._metadata.buffer_size = buffer_size
(self._pieces,
self._partitions,
self.common_metadata_path,
self.metadata_path) = _make_manifest(
path_or_paths, self._fs, metadata_nthreads=metadata_nthreads,
open_file_func=partial(_open_dataset_file, self._metadata)
)
if self.common_metadata_path is not None:
with self._fs.open(self.common_metadata_path) as f:
self._metadata.common_metadata = read_metadata(
f,
memory_map=memory_map
)
else:
self._metadata.common_metadata = None
if metadata is None and self.metadata_path is not None:
with self._fs.open(self.metadata_path) as f:
self.metadata = read_metadata(f, memory_map=memory_map)
else:
self.metadata = metadata
self.schema = schema
self.split_row_groups = split_row_groups
if split_row_groups:
raise NotImplementedError("split_row_groups not yet implemented")
if filters is not None:
filters = _check_filters(filters)
self._filter(filters)
if validate_schema:
self.validate_schemas()
def equals(self, other):
if not isinstance(other, ParquetDataset):
raise TypeError('`other` must be an instance of ParquetDataset')
if self._fs.__class__ != other._fs.__class__:
return False
for prop in ('paths', '_pieces', '_partitions',
'common_metadata_path', 'metadata_path',
'common_metadata', 'metadata', 'schema',
'split_row_groups'):
if getattr(self, prop) != getattr(other, prop):
return False
for prop in ('memory_map', 'buffer_size'):
if getattr(self._metadata, prop) != getattr(other._metadata, prop):
return False
return True
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def validate_schemas(self):
if self.metadata is None and self.schema is None:
if self.common_metadata is not None:
self.schema = self.common_metadata.schema
else:
self.schema = self._pieces[0].get_metadata().schema
elif self.schema is None:
self.schema = self.metadata.schema
# Verify schemas are all compatible
dataset_schema = self.schema.to_arrow_schema()
# Exclude the partition columns from the schema, they are provided
# by the path, not the DatasetPiece
if self._partitions is not None:
for partition_name in self._partitions.partition_names:
if dataset_schema.get_field_index(partition_name) != -1:
field_idx = dataset_schema.get_field_index(partition_name)
dataset_schema = dataset_schema.remove(field_idx)
for piece in self._pieces:
file_metadata = piece.get_metadata()
file_schema = file_metadata.schema.to_arrow_schema()
if not dataset_schema.equals(file_schema, check_metadata=False):
raise ValueError('Schema in {!s} was different. \n'
'{!s}\n\nvs\n\n{!s}'
.format(piece, file_schema,
dataset_schema))
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read multiple Parquet files as a single pyarrow.Table.
Parameters
----------
columns : List[str]
Names of columns to read from the file.
use_threads : bool, default True
Perform multi-threaded column reads
use_pandas_metadata : bool, default False
Passed through to each dataset piece.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
tables = []
for piece in self._pieces:
table = piece.read(columns=columns, use_threads=use_threads,
partitions=self._partitions,
use_pandas_metadata=use_pandas_metadata)
tables.append(table)
all_data = lib.concat_tables(tables)
if use_pandas_metadata:
# We need to ensure that this metadata is set in the Table's schema
# so that Table.to_pandas will construct pandas.DataFrame with the
# right index
common_metadata = self._get_common_pandas_metadata()
current_metadata = all_data.schema.metadata or {}
if common_metadata and b'pandas' not in current_metadata:
all_data = all_data.replace_schema_metadata({
b'pandas': common_metadata})
return all_data
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details.
Parameters
----------
**kwargs : optional
All additional options to pass to the reader.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
return self.read(use_pandas_metadata=True, **kwargs)
def _get_common_pandas_metadata(self):
if self.common_metadata is None:
return None
keyvalues = self.common_metadata.metadata
return keyvalues.get(b'pandas', None)
def _filter(self, filters):
accepts_filter = self._partitions.filter_accepts_partition
def one_filter_accepts(piece, filter):
return all(accepts_filter(part_key, filter, level)
for level, part_key in enumerate(piece.partition_keys))
def all_filters_accept(piece):
return any(all(one_filter_accepts(piece, f) for f in conjunction)
for conjunction in filters)
self._pieces = [p for p in self._pieces if all_filters_accept(p)]
@property
def pieces(self):
warnings.warn(
_DEPR_MSG.format(
"ParquetDataset.pieces",
" Specify 'use_legacy_dataset=False' while constructing the "
"ParquetDataset, and then use the '.fragments' attribute "
"instead."),
DeprecationWarning, stacklevel=2)
return self._pieces
@property
def partitions(self):
warnings.warn(
_DEPR_MSG.format(
"ParquetDataset.partitions",
" Specify 'use_legacy_dataset=False' while constructing the "
"ParquetDataset, and then use the '.partitioning' attribute "
"instead."),
DeprecationWarning, stacklevel=2)
return self._partitions
@property
def memory_map(self):
warnings.warn(
_DEPR_MSG.format("ParquetDataset.memory_map", ""),
DeprecationWarning, stacklevel=2)
return self._metadata.memory_map
@property
def read_dictionary(self):
warnings.warn(
_DEPR_MSG.format("ParquetDataset.read_dictionary", ""),
DeprecationWarning, stacklevel=2)
return self._metadata.read_dictionary
@property
def buffer_size(self):
warnings.warn(
_DEPR_MSG.format("ParquetDataset.buffer_size", ""),
DeprecationWarning, stacklevel=2)
return self._metadata.buffer_size
_fs = property(
operator.attrgetter('_metadata.fs')
)
@property
def fs(self):
warnings.warn(
_DEPR_MSG.format(
"ParquetDataset.fs",
" Specify 'use_legacy_dataset=False' while constructing the "
"ParquetDataset, and then use the '.filesystem' attribute "
"instead."),
DeprecationWarning, stacklevel=2)
return self._metadata.fs
common_metadata = property(
operator.attrgetter('_metadata.common_metadata')
)
def _make_manifest(path_or_paths, fs, pathsep='/', metadata_nthreads=1,
open_file_func=None):
partitions = None
common_metadata_path = None
metadata_path = None
if isinstance(path_or_paths, list) and len(path_or_paths) == 1:
# Dask passes a directory as a list of length 1
path_or_paths = path_or_paths[0]
if _is_path_like(path_or_paths) and fs.isdir(path_or_paths):
manifest = ParquetManifest(path_or_paths, filesystem=fs,
open_file_func=open_file_func,
pathsep=getattr(fs, "pathsep", "/"),
metadata_nthreads=metadata_nthreads)
common_metadata_path = manifest.common_metadata_path
metadata_path = manifest.metadata_path
pieces = manifest.pieces
partitions = manifest.partitions
else:
if not isinstance(path_or_paths, list):
path_or_paths = [path_or_paths]
# List of paths
if len(path_or_paths) == 0:
raise ValueError('Must pass at least one file path')
pieces = []
for path in path_or_paths:
if not fs.isfile(path):
raise OSError('Passed non-file path: {}'
.format(path))
piece = ParquetDatasetPiece._create(
path, open_file_func=open_file_func)
pieces.append(piece)
return pieces, partitions, common_metadata_path, metadata_path
def _is_local_file_system(fs):
return isinstance(fs, LocalFileSystem) or isinstance(
fs, legacyfs.LocalFileSystem
)
class _ParquetDatasetV2:
"""
ParquetDataset shim using the Dataset API under the hood.
"""
def __init__(self, path_or_paths, filesystem=None, filters=None,
partitioning="hive", read_dictionary=None, buffer_size=None,
memory_map=False, ignore_prefixes=None, pre_buffer=True,
coerce_int96_timestamp_unit=None, **kwargs):
import pyarrow.dataset as ds
# Raise error for not supported keywords
for keyword, default in [
("schema", None), ("metadata", None),
("split_row_groups", False), ("validate_schema", True),
("metadata_nthreads", 1)]:
if keyword in kwargs and kwargs[keyword] is not default:
raise ValueError(
"Keyword '{0}' is not yet supported with the new "
"Dataset API".format(keyword))
# map format arguments
read_options = {
"pre_buffer": pre_buffer,
"coerce_int96_timestamp_unit": coerce_int96_timestamp_unit
}
if buffer_size:
read_options.update(use_buffered_stream=True,
buffer_size=buffer_size)
if read_dictionary is not None:
read_options.update(dictionary_columns=read_dictionary)
# map filters to Expressions
self._filters = filters
self._filter_expression = filters and _filters_to_expression(filters)
# map old filesystems to new one
if filesystem is not None:
filesystem = _ensure_filesystem(
filesystem, use_mmap=memory_map)
elif filesystem is None and memory_map:
# if memory_map is specified, assume local file system (string
# path can in principle be URI for any filesystem)
filesystem = LocalFileSystem(use_mmap=memory_map)
# This needs to be checked after _ensure_filesystem, because that
# handles the case of an fsspec LocalFileSystem
if (
hasattr(path_or_paths, "__fspath__") and
filesystem is not None and
not _is_local_file_system(filesystem)
):
raise TypeError(
"Path-like objects with __fspath__ must only be used with "
f"local file systems, not {type(filesystem)}"
)
# check for single fragment dataset
single_file = None
if isinstance(path_or_paths, list):
if len(path_or_paths) == 1:
single_file = path_or_paths[0]
else:
if _is_path_like(path_or_paths):
path_or_paths = _stringify_path(path_or_paths)
if filesystem is None:
# path might be a URI describing the FileSystem as well
try:
filesystem, path_or_paths = FileSystem.from_uri(
path_or_paths)
except ValueError:
filesystem = LocalFileSystem(use_mmap=memory_map)
if filesystem.get_file_info(path_or_paths).is_file:
single_file = path_or_paths
else:
single_file = path_or_paths
if single_file is not None:
self._enable_parallel_column_conversion = True
read_options.update(enable_parallel_column_conversion=True)
parquet_format = ds.ParquetFileFormat(**read_options)
fragment = parquet_format.make_fragment(single_file, filesystem)
self._dataset = ds.FileSystemDataset(
[fragment], schema=fragment.physical_schema,
format=parquet_format,
filesystem=fragment.filesystem
)
return
else:
self._enable_parallel_column_conversion = False
parquet_format = ds.ParquetFileFormat(**read_options)
# check partitioning to enable dictionary encoding
if partitioning == "hive":
partitioning = ds.HivePartitioning.discover(
infer_dictionary=True)
self._dataset = ds.dataset(path_or_paths, filesystem=filesystem,
format=parquet_format,
partitioning=partitioning,
ignore_prefixes=ignore_prefixes)
@property
def schema(self):
return self._dataset.schema
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read (multiple) Parquet files as a single pyarrow.Table.
Parameters
----------
columns : List[str]
Names of columns to read from the dataset. The partition fields
are not automatically included (in contrast to when setting
``use_legacy_dataset=True``).
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
# if use_pandas_metadata, we need to include index columns in the
# column selection, to be able to restore those in the pandas DataFrame
metadata = self.schema.metadata
if columns is not None and use_pandas_metadata:
if metadata and b'pandas' in metadata:
# RangeIndex can be represented as dict instead of column name
index_columns = [
col for col in _get_pandas_index_columns(metadata)
if not isinstance(col, dict)
]
columns = (
list(columns) + list(set(index_columns) - set(columns))
)
if self._enable_parallel_column_conversion:
if use_threads:
# Allow per-column parallelism; would otherwise cause
# contention in the presence of per-file parallelism.
use_threads = False
table = self._dataset.to_table(
columns=columns, filter=self._filter_expression,
use_threads=use_threads
)
# if use_pandas_metadata, restore the pandas metadata (which gets
# lost if doing a specific `columns` selection in to_table)
if use_pandas_metadata:
if metadata and b"pandas" in metadata:
new_metadata = table.schema.metadata or {}
new_metadata.update({b"pandas": metadata[b"pandas"]})
table = table.replace_schema_metadata(new_metadata)
return table
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details.
"""
return self.read(use_pandas_metadata=True, **kwargs)
@property
def pieces(self):
warnings.warn(
_DEPR_MSG.format("ParquetDataset.pieces",
" Use the '.fragments' attribute instead"),
DeprecationWarning, stacklevel=2)
return list(self._dataset.get_fragments())
@property
def fragments(self):
return list(self._dataset.get_fragments())
@property
def files(self):
return self._dataset.files
@property
def filesystem(self):
return self._dataset.filesystem
@property
def partitioning(self):
"""
The partitioning of the Dataset source, if discovered.
"""
return self._dataset.partitioning
_read_table_docstring = """
{0}
Parameters
----------
source : str, pyarrow.NativeFile, or file-like object
If a string passed, can be a single file name or directory name. For
file-like objects, only read a single file. Use pyarrow.BufferReader to
read a file contained in a bytes or buffer-like object.
columns : list
If not None, only these columns will be read from the file. A column
name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
'a.c', and 'a.d.e'. If empty, no columns will be read. Note
that the table will still have the correct num_rows set despite having
no columns.
use_threads : bool, default True
Perform multi-threaded column reads.
metadata : FileMetaData
If separately computed
{1}
use_legacy_dataset : bool, default False
By default, `read_table` uses the new Arrow Datasets API since
pyarrow 1.0.0. Among other things, this allows to pass `filters`
for all columns and not only the partition keys, enables
different partitioning schemes, etc.
Set to True to use the legacy behaviour.
ignore_prefixes : list, optional
Files matching any of these prefixes will be ignored by the
discovery process if use_legacy_dataset=False.
This is matched to the basename of a path.
By default this is ['.', '_'].
Note that discovery happens only if a directory is passed as source.
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem.
filters : List[Tuple] or List[List[Tuple]] or None (default)
Rows which do not match the filter predicate will be removed from scanned
data. Partition keys embedded in a nested directory structure will be
exploited to avoid loading files at all if they contain no matching rows.
If `use_legacy_dataset` is True, filters can only reference partition
keys and only a hive-style directory structure is supported. When
setting `use_legacy_dataset` to False, also within-file level filtering
and different partitioning schemes are supported.
{3}
pre_buffer : bool, default True
Coalesce and issue file reads in parallel to improve performance on
high-latency filesystems (e.g. S3). If True, Arrow will use a
background I/O thread pool. This option is only supported for
use_legacy_dataset=False. If using a filesystem layer that itself
performs readahead (e.g. fsspec's S3FS), disable readahead for best
results.
coerce_int96_timestamp_unit : str, default None.
Cast timestamps that are stored in INT96 format to a particular
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
and therefore INT96 timestamps will be inferred as timestamps
in nanoseconds.
Returns
-------
{2}
"""
def read_table(source, columns=None, use_threads=True, metadata=None,
use_pandas_metadata=False, memory_map=False,
read_dictionary=None, filesystem=None, filters=None,
buffer_size=0, partitioning="hive", use_legacy_dataset=False,
ignore_prefixes=None, pre_buffer=True,
coerce_int96_timestamp_unit=None):
if not use_legacy_dataset:
if metadata is not None:
raise ValueError(
"The 'metadata' keyword is no longer supported with the new "
"datasets-based implementation. Specify "
"'use_legacy_dataset=True' to temporarily recover the old "
"behaviour."
)
try:
dataset = _ParquetDatasetV2(
source,
filesystem=filesystem,
partitioning=partitioning,
memory_map=memory_map,
read_dictionary=read_dictionary,
buffer_size=buffer_size,
filters=filters,
ignore_prefixes=ignore_prefixes,
pre_buffer=pre_buffer,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
except ImportError:
# fall back on ParquetFile for simple cases when pyarrow.dataset
# module is not available
if filters is not None:
raise ValueError(
"the 'filters' keyword is not supported when the "
"pyarrow.dataset module is not available"
)
if partitioning != "hive":
raise ValueError(
"the 'partitioning' keyword is not supported when the "
"pyarrow.dataset module is not available"
)
filesystem, path = _resolve_filesystem_and_path(source, filesystem)
if filesystem is not None:
source = filesystem.open_input_file(path)
# TODO test that source is not a directory or a list
dataset = ParquetFile(
source, metadata=metadata, read_dictionary=read_dictionary,
memory_map=memory_map, buffer_size=buffer_size,
pre_buffer=pre_buffer,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
return dataset.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if ignore_prefixes is not None:
raise ValueError(
"The 'ignore_prefixes' keyword is only supported when "
"use_legacy_dataset=False")
if _is_path_like(source):
pf = ParquetDataset(
source, metadata=metadata, memory_map=memory_map,
read_dictionary=read_dictionary,
buffer_size=buffer_size,
filesystem=filesystem, filters=filters,
partitioning=partitioning,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
else:
pf = ParquetFile(
source, metadata=metadata,
read_dictionary=read_dictionary,
memory_map=memory_map,
buffer_size=buffer_size,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
return pf.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
read_table.__doc__ = _read_table_docstring.format(
"""Read a Table from Parquet format
Note: starting with pyarrow 1.0, the default for `use_legacy_dataset` is
switched to False.""",
"\n".join((_read_docstring_common,
"""use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.""")),
"""pyarrow.Table
Content of the file as a table (of columns)""",
_DNF_filter_doc)
def read_pandas(source, columns=None, **kwargs):
return read_table(
source, columns=columns, use_pandas_metadata=True, **kwargs
)
read_pandas.__doc__ = _read_table_docstring.format(
'Read a Table from Parquet format, also reading DataFrame\n'
'index values if known in the file metadata',
"\n".join((_read_docstring_common,
"""**kwargs
additional options for :func:`read_table`""")),
"""pyarrow.Table
Content of the file as a Table of Columns, including DataFrame
indexes as columns""",
_DNF_filter_doc)
def write_table(table, where, row_group_size=None, version='1.0',
use_dictionary=True, compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
coerce_timestamps=None,
allow_truncated_timestamps=False,
data_page_size=None, flavor=None,
filesystem=None,
compression_level=None,
use_byte_stream_split=False,
column_encoding=None,
data_page_version='1.0',
use_compliant_nested_type=False,
**kwargs):
row_group_size = kwargs.pop('chunk_size', row_group_size)
use_int96 = use_deprecated_int96_timestamps
try:
with ParquetWriter(
where, table.schema,
filesystem=filesystem,
version=version,
flavor=flavor,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
coerce_timestamps=coerce_timestamps,
data_page_size=data_page_size,
allow_truncated_timestamps=allow_truncated_timestamps,
compression=compression,
use_deprecated_int96_timestamps=use_int96,
compression_level=compression_level,
use_byte_stream_split=use_byte_stream_split,
column_encoding=column_encoding,
data_page_version=data_page_version,
use_compliant_nested_type=use_compliant_nested_type,
**kwargs) as writer:
writer.write_table(table, row_group_size=row_group_size)
except Exception:
if _is_path_like(where):
try:
os.remove(_stringify_path(where))
except os.error:
pass
raise
write_table.__doc__ = """
Write a Table to Parquet format.
Parameters
----------
table : pyarrow.Table
where : string or pyarrow.NativeFile
row_group_size : int
Maximum size of each written row group. If None, the
row group size will be the minimum of the Table size
and 64 * 1024 * 1024.
{}
**kwargs : optional
Additional options for ParquetWriter
""".format(_parquet_writer_arg_docs)
def _mkdir_if_not_exists(fs, path):
if fs._isfilestore() and not fs.exists(path):
try:
fs.mkdir(path)
except OSError:
assert fs.exists(path)
def write_to_dataset(table, root_path, partition_cols=None,
partition_filename_cb=None, filesystem=None,
use_legacy_dataset=None, **kwargs):
"""Wrapper around parquet.write_table for writing a Table to
Parquet format by partitions.
For each combination of partition columns and values,
a subdirectories are created in the following
manner:
root_dir/
group1=value1
group2=value1
<uuid>.parquet
group2=value2
<uuid>.parquet
group1=valueN
group2=value1
<uuid>.parquet
group2=valueN
<uuid>.parquet
Parameters
----------
table : pyarrow.Table
root_path : str, pathlib.Path
The root directory of the dataset
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
partition_cols : list,
Column names by which to partition the dataset
Columns are partitioned in the order they are given
partition_filename_cb : callable,
A callback function that takes the partition key(s) as an argument
and allow you to override the partition filename. If nothing is
passed, the filename will consist of a uuid.
use_legacy_dataset : bool
Default is True unless a ``pyarrow.fs`` filesystem is passed.
Set to False to enable the new code path (experimental, using the
new Arrow Dataset API). This is more efficient when using partition
columns, but does not (yet) support `partition_filename_cb` and
`metadata_collector` keywords.
**kwargs : dict,
Additional kwargs for write_table function. See docstring for
`write_table` or `ParquetWriter` for more information.
Using `metadata_collector` in kwargs allows one to collect the
file metadata instances of dataset pieces. The file paths in the
ColumnChunkMetaData will be set relative to `root_path`.
"""
if use_legacy_dataset is None:
# if a new filesystem is passed -> default to new implementation
if isinstance(filesystem, FileSystem):
use_legacy_dataset = False
# otherwise the default is still True
else:
use_legacy_dataset = True
if not use_legacy_dataset:
import pyarrow.dataset as ds
# extract non-file format options
schema = kwargs.pop("schema", None)
use_threads = kwargs.pop("use_threads", True)
# raise for unsupported keywords
msg = (
"The '{}' argument is not supported with the new dataset "
"implementation."
)
metadata_collector = kwargs.pop('metadata_collector', None)
file_visitor = None
if metadata_collector is not None:
def file_visitor(written_file):
metadata_collector.append(written_file.metadata)
if partition_filename_cb is not None:
raise ValueError(msg.format("partition_filename_cb"))
# map format arguments
parquet_format = ds.ParquetFileFormat()
write_options = parquet_format.make_write_options(**kwargs)
# map old filesystems to new one
if filesystem is not None:
filesystem = _ensure_filesystem(filesystem)
partitioning = None
if partition_cols:
part_schema = table.select(partition_cols).schema
partitioning = ds.partitioning(part_schema, flavor="hive")
ds.write_dataset(
table, root_path, filesystem=filesystem,
format=parquet_format, file_options=write_options, schema=schema,
partitioning=partitioning, use_threads=use_threads,
file_visitor=file_visitor)
return
fs, root_path = legacyfs.resolve_filesystem_and_path(root_path, filesystem)
_mkdir_if_not_exists(fs, root_path)
metadata_collector = kwargs.pop('metadata_collector', None)
if partition_cols is not None and len(partition_cols) > 0:
df = table.to_pandas()
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis='columns')
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0:
raise ValueError('No data left to save outside partition columns')
subschema = table.schema
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = '/'.join(
['{colname}={value}'.format(colname=name, value=val)
for name, val in zip(partition_cols, keys)])
subtable = pa.Table.from_pandas(subgroup, schema=subschema,
safe=False)
_mkdir_if_not_exists(fs, '/'.join([root_path, subdir]))
if partition_filename_cb:
outfile = partition_filename_cb(keys)
else:
outfile = guid() + '.parquet'
relative_path = '/'.join([subdir, outfile])
full_path = '/'.join([root_path, relative_path])
with fs.open(full_path, 'wb') as f:
write_table(subtable, f, metadata_collector=metadata_collector,
**kwargs)
if metadata_collector is not None:
metadata_collector[-1].set_file_path(relative_path)
else:
if partition_filename_cb:
outfile = partition_filename_cb(None)
else:
outfile = guid() + '.parquet'
full_path = '/'.join([root_path, outfile])
with fs.open(full_path, 'wb') as f:
write_table(table, f, metadata_collector=metadata_collector,
**kwargs)
if metadata_collector is not None:
metadata_collector[-1].set_file_path(outfile)
def write_metadata(schema, where, metadata_collector=None, **kwargs):
"""
Write metadata-only Parquet file from schema. This can be used with
`write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar
files.
Parameters
----------
schema : pyarrow.Schema
where : string or pyarrow.NativeFile
metadata_collector : list
where to collect metadata information.
**kwargs : dict,
Additional kwargs for ParquetWriter class. See docstring for
`ParquetWriter` for more information.
Examples
--------
Write a dataset and collect metadata information.
>>> metadata_collector = []
>>> write_to_dataset(
... table, root_path,
... metadata_collector=metadata_collector, **writer_kwargs)
Write the `_common_metadata` parquet file without row groups statistics.
>>> write_metadata(
... table.schema, root_path / '_common_metadata', **writer_kwargs)
Write the `_metadata` parquet file with row groups statistics.
>>> write_metadata(
... table.schema, root_path / '_metadata',
... metadata_collector=metadata_collector, **writer_kwargs)
"""
writer = ParquetWriter(where, schema, **kwargs)
writer.close()
if metadata_collector is not None:
# ParquetWriter doesn't expose the metadata until it's written. Write
# it and read it again.
metadata = read_metadata(where)
for m in metadata_collector:
metadata.append_row_groups(m)
metadata.write_metadata_file(where)
def read_metadata(where, memory_map=False):
"""
Read FileMetadata from footer of a single Parquet file.
Parameters
----------
where : str (file path) or file-like object
memory_map : bool, default False
Create memory map when the source is a file path.
Returns
-------
metadata : FileMetadata
"""
return ParquetFile(where, memory_map=memory_map).metadata
def read_schema(where, memory_map=False):
"""
Read effective Arrow schema from Parquet file metadata.
Parameters
----------
where : str (file path) or file-like object
memory_map : bool, default False
Create memory map when the source is a file path.
Returns
-------
schema : pyarrow.Schema
"""
return ParquetFile(where, memory_map=memory_map).schema.to_arrow_schema()
|
icexelloss/arrow
|
python/pyarrow/parquet.py
|
Python
|
apache-2.0
| 89,131
|
[
"VisIt"
] |
7b9a21d63e96577d50977beaabd4bbbabac2f0f586171b295fcfd59aafc5fba5
|
from datetime import datetime, timedelta
from typing import List
import warnings
from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE # noqa
import numpy as np
from pandas.errors import PerformanceWarning
from pandas import DateOffset, Series, Timestamp, date_range
from pandas.tseries.offsets import Day, Easter
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday:
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(
self,
name,
year=None,
month=None,
day=None,
offset=None,
observance=None,
start_date=None,
end_date=None,
days_of_week=None,
):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('Memorial Day', month=5, day=31,
offset=pd.DateOffset(weekday=MO(-1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=pd.DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = (
Timestamp(start_date) if start_date is not None else start_date
)
self.end_date = Timestamp(end_date) if end_date is not None else end_date
self.observance = observance
assert days_of_week is None or type(days_of_week) == tuple
self.days_of_week = days_of_week
def __repr__(self):
info = ""
if self.year is not None:
info += "year={year}, ".format(year=self.year)
info += "month={mon}, day={day}, ".format(mon=self.month, day=self.day)
if self.offset is not None:
info += "offset={offset}".format(offset=self.offset)
if self.observance is not None:
info += "observance={obs}".format(obs=self.observance)
repr = "Holiday: {name} ({info})".format(name=self.name, info=info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[
np.in1d(holiday_dates.dayofweek, self.days_of_week)
]
if self.start_date is not None:
filter_start_date = max(
self.start_date.tz_localize(filter_start_date.tz), filter_start_date
)
if self.end_date is not None:
filter_end_date = min(
self.end_date.tz_localize(filter_end_date.tz), filter_end_date
)
holiday_dates = holiday_dates[
(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)
]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day)
)
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day)
)
# Don't process unnecessary holidays
dates = date_range(
start=reference_start_date,
end=reference_end_date,
freq=year_offset,
tz=start_date.tz,
)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore", PerformanceWarning)
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except AttributeError:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super().__new__(cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass):
"""
Abstract interface to create holidays following certain rules.
"""
rules = [] # type: List[Holiday]
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super().__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception(
"Holiday Calendar {name} does not have any "
"rules specified".format(name=self.name)
)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if self._cache is None or start < self._cache[0] or end > self._cache[1]:
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except AttributeError:
pass
if not isinstance(other, list):
other = [other]
other_holidays = {holiday.name: holiday for holiday in other}
try:
base = base.rules
except AttributeError:
pass
if not isinstance(base, list):
base = [base]
base_holidays = {holiday.name: holiday for holiday in base}
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday(
"Memorial Day", month=5, day=31, offset=DateOffset(weekday=MO(-1))
)
USLaborDay = Holiday("Labor Day", month=9, day=1, offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday(
"Columbus Day", month=10, day=1, offset=DateOffset(weekday=MO(2))
)
USThanksgivingDay = Holiday(
"Thanksgiving", month=11, day=1, offset=DateOffset(weekday=TH(4))
)
USMartinLutherKingJr = Holiday(
"Martin Luther King Jr. Day",
start_date=datetime(1986, 1, 1),
month=1,
day=1,
offset=DateOffset(weekday=MO(3)),
)
USPresidentsDay = Holiday(
"Presidents Day", month=2, day=1, offset=DateOffset(weekday=MO(3))
)
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday("New Years Day", month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday("July 4th", month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday("Veterans Day", month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday("Christmas", month=12, day=25, observance=nearest_workday),
]
def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
|
toobaz/pandas
|
pandas/tseries/holiday.py
|
Python
|
bsd-3-clause
| 16,121
|
[
"COLUMBUS"
] |
8d1515f2947f4c2a9179380afc986a43f9d8ddd534ec6bda41199434dda47463
|
# Copyright (c) OpenMMLab. All rights reserved.
from logging import warning
from math import ceil, log
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob
from mmcv.ops import CornerPool, batched_nms
from mmcv.runner import BaseModule
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from ..utils import gaussian_radius, gen_gaussian_target
from ..utils.gaussian_target import (gather_feat, get_local_maximum,
get_topk_from_heatmap,
transpose_and_gather_feat)
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
class BiCornerPool(BaseModule):
"""Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
Args:
in_channels (int): Input channels of module.
out_channels (int): Output channels of module.
feat_channels (int): Feature channels of module.
directions (list[str]): Directions of two CornerPools.
norm_cfg (dict): Dictionary to construct and config norm layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
directions,
feat_channels=128,
out_channels=128,
norm_cfg=dict(type='BN', requires_grad=True),
init_cfg=None):
super(BiCornerPool, self).__init__(init_cfg)
self.direction1_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction2_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.aftpool_conv = ConvModule(
feat_channels,
out_channels,
3,
padding=1,
norm_cfg=norm_cfg,
act_cfg=None)
self.conv1 = ConvModule(
in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.conv2 = ConvModule(
in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction1_pool = CornerPool(directions[0])
self.direction2_pool = CornerPool(directions[1])
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward features from the upstream network.
Args:
x (tensor): Input feature of BiCornerPool.
Returns:
conv2 (tensor): Output feature of BiCornerPool.
"""
direction1_conv = self.direction1_conv(x)
direction2_conv = self.direction2_conv(x)
direction1_feat = self.direction1_pool(direction1_conv)
direction2_feat = self.direction2_pool(direction2_conv)
aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
conv1 = self.conv1(x)
relu = self.relu(aftpool_conv + conv1)
conv2 = self.conv2(relu)
return conv2
@HEADS.register_module()
class CornerHead(BaseDenseHead, BBoxTestMixin):
"""Head of CornerNet: Detecting Objects as Paired Keypoints.
Code is modified from the `official github repo
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/
kp.py#L73>`_ .
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_ .
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
num_feat_levels (int): Levels of feature from the previous module. 2
for HourglassNet-104 and 1 for HourglassNet-52. Because
HourglassNet-104 outputs the final feature and intermediate
supervision feature and HourglassNet-52 only outputs the final
feature. Default: 2.
corner_emb_channels (int): Channel of embedding vector. Default: 1.
train_cfg (dict | None): Training config. Useless in CornerHead,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CornerHead. Default: None.
loss_heatmap (dict | None): Config of corner heatmap loss. Default:
GaussianFocalLoss.
loss_embedding (dict | None): Config of corner embedding loss. Default:
AssociativeEmbeddingLoss.
loss_offset (dict | None): Config of corner offset loss. Default:
SmoothL1Loss.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_classes,
in_channels,
num_feat_levels=2,
corner_emb_channels=1,
train_cfg=None,
test_cfg=None,
loss_heatmap=dict(
type='GaussianFocalLoss',
alpha=2.0,
gamma=4.0,
loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.25,
push_weight=0.25),
loss_offset=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(CornerHead, self).__init__(init_cfg)
self.num_classes = num_classes
self.in_channels = in_channels
self.corner_emb_channels = corner_emb_channels
self.with_corner_emb = self.corner_emb_channels > 0
self.corner_offset_channels = 2
self.num_feat_levels = num_feat_levels
self.loss_heatmap = build_loss(
loss_heatmap) if loss_heatmap is not None else None
self.loss_embedding = build_loss(
loss_embedding) if loss_embedding is not None else None
self.loss_offset = build_loss(
loss_offset) if loss_offset is not None else None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self._init_layers()
def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
"""Initialize conv sequential for CornerHead."""
return nn.Sequential(
ConvModule(in_channels, feat_channels, 3, padding=1),
ConvModule(
feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
def _init_corner_kpt_layers(self):
"""Initialize corner keypoint layers.
Including corner heatmap branch and corner offset branch. Each branch
has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
"""
self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_pool.append(
BiCornerPool(
self.in_channels, ['top', 'left'],
out_channels=self.in_channels))
self.br_pool.append(
BiCornerPool(
self.in_channels, ['bottom', 'right'],
out_channels=self.in_channels))
self.tl_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.br_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.tl_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
self.br_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
def _init_corner_emb_layers(self):
"""Initialize corner embedding layers.
Only include corner embedding branch with two parts: prefix `tl_` for
top-left and `br_` for bottom-right.
"""
self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
self.br_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
def _init_layers(self):
"""Initialize layers for CornerHead.
Including two parts: corner keypoint layers and corner embedding layers
"""
self._init_corner_kpt_layers()
if self.with_corner_emb:
self._init_corner_emb_layers()
def init_weights(self):
super(CornerHead, self).init_weights()
bias_init = bias_init_with_prob(0.1)
for i in range(self.num_feat_levels):
# The initialization of parameters are different between
# nn.Conv2d and ConvModule. Our experiments show that
# using the original initialization of nn.Conv2d increases
# the final mAP by about 0.2%
self.tl_heat[i][-1].conv.reset_parameters()
self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
self.br_heat[i][-1].conv.reset_parameters()
self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
self.tl_off[i][-1].conv.reset_parameters()
self.br_off[i][-1].conv.reset_parameters()
if self.with_corner_emb:
self.tl_emb[i][-1].conv.reset_parameters()
self.br_emb[i][-1].conv.reset_parameters()
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of corner heatmaps, offset heatmaps and
embedding heatmaps.
- tl_heats (list[Tensor]): Top-left corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- br_heats (list[Tensor]): Bottom-right corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- tl_embs (list[Tensor] | list[None]): Top-left embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- br_embs (list[Tensor] | list[None]): Bottom-right embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- tl_offs (list[Tensor]): Top-left offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
- br_offs (list[Tensor]): Bottom-right offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
"""
lvl_ind = list(range(self.num_feat_levels))
return multi_apply(self.forward_single, feats, lvl_ind)
def forward_single(self, x, lvl_ind, return_pool=False):
"""Forward feature of a single level.
Args:
x (Tensor): Feature of a single level.
lvl_ind (int): Level index of current feature.
return_pool (bool): Return corner pool feature or not.
Returns:
tuple[Tensor]: A tuple of CornerHead's output for current feature
level. Containing the following Tensors:
- tl_heat (Tensor): Predicted top-left corner heatmap.
- br_heat (Tensor): Predicted bottom-right corner heatmap.
- tl_emb (Tensor | None): Predicted top-left embedding heatmap.
None for `self.with_corner_emb == False`.
- br_emb (Tensor | None): Predicted bottom-right embedding
heatmap. None for `self.with_corner_emb == False`.
- tl_off (Tensor): Predicted top-left offset heatmap.
- br_off (Tensor): Predicted bottom-right offset heatmap.
- tl_pool (Tensor): Top-left corner pool feature. Not must
have.
- br_pool (Tensor): Bottom-right corner pool feature. Not must
have.
"""
tl_pool = self.tl_pool[lvl_ind](x)
tl_heat = self.tl_heat[lvl_ind](tl_pool)
br_pool = self.br_pool[lvl_ind](x)
br_heat = self.br_heat[lvl_ind](br_pool)
tl_emb, br_emb = None, None
if self.with_corner_emb:
tl_emb = self.tl_emb[lvl_ind](tl_pool)
br_emb = self.br_emb[lvl_ind](br_pool)
tl_off = self.tl_off[lvl_ind](tl_pool)
br_off = self.br_off[lvl_ind](br_pool)
result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
if return_pool:
result_list.append(tl_pool)
result_list.append(br_pool)
return result_list
def get_targets(self,
gt_bboxes,
gt_labels,
feat_shape,
img_shape,
with_corner_emb=False,
with_guiding_shift=False,
with_centripetal_shift=False):
"""Generate corner targets.
Including corner heatmap, corner offset.
Optional: corner embedding, corner guiding shift, centripetal shift.
For CornerNet, we generate corner heatmap, corner offset and corner
embedding from this function.
For CentripetalNet, we generate corner heatmap, corner offset, guiding
shift and centripetal shift from this function.
Args:
gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
has shape (num_gt, 4).
gt_labels (list[Tensor]): Ground truth labels of each box, each has
shape (num_gt,).
feat_shape (list[int]): Shape of output feature,
[batch, channel, height, width].
img_shape (list[int]): Shape of input image,
[height, width, channel].
with_corner_emb (bool): Generate corner embedding target or not.
Default: False.
with_guiding_shift (bool): Generate guiding shift target or not.
Default: False.
with_centripetal_shift (bool): Generate centripetal shift target or
not. Default: False.
Returns:
dict: Ground truth of corner heatmap, corner offset, corner
embedding, guiding shift and centripetal shift. Containing the
following keys:
- topleft_heatmap (Tensor): Ground truth top-left corner
heatmap.
- bottomright_heatmap (Tensor): Ground truth bottom-right
corner heatmap.
- topleft_offset (Tensor): Ground truth top-left corner offset.
- bottomright_offset (Tensor): Ground truth bottom-right corner
offset.
- corner_embedding (list[list[list[int]]]): Ground truth corner
embedding. Not must have.
- topleft_guiding_shift (Tensor): Ground truth top-left corner
guiding shift. Not must have.
- bottomright_guiding_shift (Tensor): Ground truth bottom-right
corner guiding shift. Not must have.
- topleft_centripetal_shift (Tensor): Ground truth top-left
corner centripetal shift. Not must have.
- bottomright_centripetal_shift (Tensor): Ground truth
bottom-right corner centripetal shift. Not must have.
"""
batch_size, _, height, width = feat_shape
img_h, img_w = img_shape[:2]
width_ratio = float(width / img_w)
height_ratio = float(height / img_h)
gt_tl_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_br_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
if with_corner_emb:
match = []
# Guiding shift is a kind of offset, from center to corner
if with_guiding_shift:
gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
# Centripetal shift is also a kind of offset, from center to corner
# and normalized by log.
if with_centripetal_shift:
gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
for batch_id in range(batch_size):
# Ground truth of corner embedding per image is a list of coord set
corner_match = []
for box_id in range(len(gt_labels[batch_id])):
left, top, right, bottom = gt_bboxes[batch_id][box_id]
center_x = (left + right) / 2.0
center_y = (top + bottom) / 2.0
label = gt_labels[batch_id][box_id]
# Use coords in the feature level to generate ground truth
scale_left = left * width_ratio
scale_right = right * width_ratio
scale_top = top * height_ratio
scale_bottom = bottom * height_ratio
scale_center_x = center_x * width_ratio
scale_center_y = center_y * height_ratio
# Int coords on feature map/ground truth tensor
left_idx = int(min(scale_left, width - 1))
right_idx = int(min(scale_right, width - 1))
top_idx = int(min(scale_top, height - 1))
bottom_idx = int(min(scale_bottom, height - 1))
# Generate gaussian heatmap
scale_box_width = ceil(scale_right - scale_left)
scale_box_height = ceil(scale_bottom - scale_top)
radius = gaussian_radius((scale_box_height, scale_box_width),
min_overlap=0.3)
radius = max(0, int(radius))
gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
radius)
gt_br_heatmap[batch_id, label] = gen_gaussian_target(
gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
radius)
# Generate corner offset
left_offset = scale_left - left_idx
top_offset = scale_top - top_idx
right_offset = scale_right - right_idx
bottom_offset = scale_bottom - bottom_idx
gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
gt_br_offset[batch_id, 1, bottom_idx,
right_idx] = bottom_offset
# Generate corner embedding
if with_corner_emb:
corner_match.append([[top_idx, left_idx],
[bottom_idx, right_idx]])
# Generate guiding shift
if with_guiding_shift:
gt_tl_guiding_shift[batch_id, 0, top_idx,
left_idx] = scale_center_x - left_idx
gt_tl_guiding_shift[batch_id, 1, top_idx,
left_idx] = scale_center_y - top_idx
gt_br_guiding_shift[batch_id, 0, bottom_idx,
right_idx] = right_idx - scale_center_x
gt_br_guiding_shift[
batch_id, 1, bottom_idx,
right_idx] = bottom_idx - scale_center_y
# Generate centripetal shift
if with_centripetal_shift:
gt_tl_centripetal_shift[batch_id, 0, top_idx,
left_idx] = log(scale_center_x -
scale_left)
gt_tl_centripetal_shift[batch_id, 1, top_idx,
left_idx] = log(scale_center_y -
scale_top)
gt_br_centripetal_shift[batch_id, 0, bottom_idx,
right_idx] = log(scale_right -
scale_center_x)
gt_br_centripetal_shift[batch_id, 1, bottom_idx,
right_idx] = log(scale_bottom -
scale_center_y)
if with_corner_emb:
match.append(corner_match)
target_result = dict(
topleft_heatmap=gt_tl_heatmap,
topleft_offset=gt_tl_offset,
bottomright_heatmap=gt_br_heatmap,
bottomright_offset=gt_br_offset)
if with_corner_emb:
target_result.update(corner_embedding=match)
if with_guiding_shift:
target_result.update(
topleft_guiding_shift=gt_tl_guiding_shift,
bottomright_guiding_shift=gt_br_guiding_shift)
if with_centripetal_shift:
target_result.update(
topleft_centripetal_shift=gt_tl_centripetal_shift,
bottomright_centripetal_shift=gt_br_centripetal_shift)
return target_result
def loss(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [left, top, right, bottom] format.
gt_labels (list[Tensor]): Class indices corresponding to each box.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components. Containing the
following losses:
- det_loss (list[Tensor]): Corner keypoint losses of all
feature levels.
- pull_loss (list[Tensor]): Part one of AssociativeEmbedding
losses of all feature levels.
- push_loss (list[Tensor]): Part two of AssociativeEmbedding
losses of all feature levels.
- off_loss (list[Tensor]): Corner offset losses of all feature
levels.
"""
targets = self.get_targets(
gt_bboxes,
gt_labels,
tl_heats[-1].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb)
mlvl_targets = [targets for _ in range(self.num_feat_levels)]
det_losses, pull_losses, push_losses, off_losses = multi_apply(
self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, mlvl_targets)
loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
if self.with_corner_emb:
loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
return loss_dict
def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
targets):
"""Compute losses for single level.
Args:
tl_hmp (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_hmp (Tensor): Bottom-right corner heatmap for current level with
shape (N, num_classes, H, W).
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
targets (dict): Corner target generated by `get_targets`.
Returns:
tuple[torch.Tensor]: Losses of the head's different branches
containing the following losses:
- det_loss (Tensor): Corner keypoint loss.
- pull_loss (Tensor): Part one of AssociativeEmbedding loss.
- push_loss (Tensor): Part two of AssociativeEmbedding loss.
- off_loss (Tensor): Corner offset loss.
"""
gt_tl_hmp = targets['topleft_heatmap']
gt_br_hmp = targets['bottomright_heatmap']
gt_tl_off = targets['topleft_offset']
gt_br_off = targets['bottomright_offset']
gt_embedding = targets['corner_embedding']
# Detection loss
tl_det_loss = self.loss_heatmap(
tl_hmp.sigmoid(),
gt_tl_hmp,
avg_factor=max(1,
gt_tl_hmp.eq(1).sum()))
br_det_loss = self.loss_heatmap(
br_hmp.sigmoid(),
gt_br_hmp,
avg_factor=max(1,
gt_br_hmp.eq(1).sum()))
det_loss = (tl_det_loss + br_det_loss) / 2.0
# AssociativeEmbedding loss
if self.with_corner_emb and self.loss_embedding is not None:
pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
gt_embedding)
else:
pull_loss, push_loss = None, None
# Offset loss
# We only compute the offset loss at the real corner position.
# The value of real corner would be 1 in heatmap ground truth.
# The mask is computed in class agnostic mode and its shape is
# batch * 1 * width * height.
tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_tl_hmp)
br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_br_hmp)
tl_off_loss = self.loss_offset(
tl_off,
gt_tl_off,
tl_off_mask,
avg_factor=max(1, tl_off_mask.sum()))
br_off_loss = self.loss_offset(
br_off,
gt_br_off,
br_off_mask,
avg_factor=max(1, br_off_mask.sum()))
off_loss = (tl_off_loss + br_off_loss) / 2.0
return det_loss, pull_loss, push_loss, off_loss
def get_bboxes(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
br_emb=br_embs[-1][img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
return result_list
def _get_bboxes_single(self,
tl_heat,
br_heat,
tl_off,
br_off,
img_meta,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift: Top-left corner's centripetal shift for
current level with shape (N, 2, H, W).
br_centripetal_shift: Bottom-right corner's centripetal shift for
current level with shape (N, 2, H, W).
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
if isinstance(img_meta, (list, tuple)):
img_meta = img_meta[0]
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
tl_heat=tl_heat.sigmoid(),
br_heat=br_heat.sigmoid(),
tl_off=tl_off,
br_off=br_off,
tl_emb=tl_emb,
br_emb=br_emb,
tl_centripetal_shift=tl_centripetal_shift,
br_centripetal_shift=br_centripetal_shift,
img_meta=img_meta,
k=self.test_cfg.corner_topk,
kernel=self.test_cfg.local_maximum_kernel,
distance_threshold=self.test_cfg.distance_threshold)
if rescale:
batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
bboxes = batch_bboxes.view([-1, 4])
scores = batch_scores.view(-1)
clses = batch_clses.view(-1)
detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
keepinds = (detections[:, -1] > -0.1)
detections = detections[keepinds]
labels = clses[keepinds]
if with_nms:
detections, labels = self._bboxes_nms(detections, labels,
self.test_cfg)
return detections, labels
def _bboxes_nms(self, bboxes, labels, cfg):
if 'nms_cfg' in cfg:
warning.warn('nms_cfg in test_cfg will be deprecated. '
'Please rename it as nms')
if 'nms' not in cfg:
cfg.nms = cfg.nms_cfg
if labels.numel() > 0:
max_num = cfg.max_per_img
bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:,
-1].contiguous(),
labels, cfg.nms)
if max_num > 0:
bboxes = bboxes[:max_num]
labels = labels[keep][:max_num]
return bboxes, labels
def decode_heatmap(self,
tl_heat,
br_heat,
tl_off,
br_off,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
img_meta=None,
k=100,
kernel=3,
distance_threshold=0.5,
num_dets=1000):
"""Transform outputs for a single batch item into raw bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
tl_emb (Tensor | None): Top-left corner embedding for current
level with shape (N, corner_emb_channels, H, W).
br_emb (Tensor | None): Bottom-right corner embedding for current
level with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift (Tensor | None): Top-left centripetal shift
for current level with shape (N, 2, H, W).
br_centripetal_shift (Tensor | None): Bottom-right centripetal
shift for current level with shape (N, 2, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
k (int): Get top k corner keypoints from heatmap.
kernel (int): Max pooling kernel for extract local maximum pixels.
distance_threshold (float): Distance threshold. Top-left and
bottom-right corner keypoints with feature distance less than
the threshold will be regarded as keypoints from same object.
num_dets (int): Num of raw boxes before doing nms.
Returns:
tuple[torch.Tensor]: Decoded output of CornerHead, containing the
following Tensors:
- bboxes (Tensor): Coords of each box.
- scores (Tensor): Scores of each box.
- clses (Tensor): Categories of each box.
"""
with_embedding = tl_emb is not None and br_emb is not None
with_centripetal_shift = (
tl_centripetal_shift is not None
and br_centripetal_shift is not None)
assert with_embedding + with_centripetal_shift == 1
batch, _, height, width = tl_heat.size()
if torch.onnx.is_in_onnx_export():
inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2]
else:
inp_h, inp_w, _ = img_meta['pad_shape']
# perform nms on heatmaps
tl_heat = get_local_maximum(tl_heat, kernel=kernel)
br_heat = get_local_maximum(br_heat, kernel=kernel)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap(
tl_heat, k=k)
br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap(
br_heat, k=k)
# We use repeat instead of expand here because expand is a
# shallow-copy function. Thus it could cause unexpected testing result
# sometimes. Using expand will decrease about 10% mAP during testing
# compared to repeat.
tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
tl_off = transpose_and_gather_feat(tl_off, tl_inds)
tl_off = tl_off.view(batch, k, 1, 2)
br_off = transpose_and_gather_feat(br_off, br_inds)
br_off = br_off.view(batch, 1, k, 2)
tl_xs = tl_xs + tl_off[..., 0]
tl_ys = tl_ys + tl_off[..., 1]
br_xs = br_xs + br_off[..., 0]
br_ys = br_ys + br_off[..., 1]
if with_centripetal_shift:
tl_centripetal_shift = transpose_and_gather_feat(
tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
br_centripetal_shift = transpose_and_gather_feat(
br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
br_ctxs = br_xs - br_centripetal_shift[..., 0]
br_ctys = br_ys - br_centripetal_shift[..., 1]
# all possible boxes based on top k corners (ignoring class)
tl_xs *= (inp_w / width)
tl_ys *= (inp_h / height)
br_xs *= (inp_w / width)
br_ys *= (inp_h / height)
if with_centripetal_shift:
tl_ctxs *= (inp_w / width)
tl_ctys *= (inp_h / height)
br_ctxs *= (inp_w / width)
br_ctys *= (inp_h / height)
x_off, y_off = 0, 0 # no crop
if not torch.onnx.is_in_onnx_export():
# since `RandomCenterCropPad` is done on CPU with numpy and it's
# not dynamic traceable when exporting to ONNX, thus 'border'
# does not appears as key in 'img_meta'. As a tmp solution,
# we move this 'border' handle part to the postprocess after
# finished exporting to ONNX, which is handle in
# `mmdet/core/export/model_wrappers.py`. Though difference between
# pytorch and exported onnx model, it might be ignored since
# comparable performance is achieved between them (e.g. 40.4 vs
# 40.6 on COCO val2017, for CornerNet without test-time flip)
if 'border' in img_meta:
x_off = img_meta['border'][2]
y_off = img_meta['border'][0]
tl_xs -= x_off
tl_ys -= y_off
br_xs -= x_off
br_ys -= y_off
zeros = tl_xs.new_zeros(*tl_xs.size())
tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros)
tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros)
br_xs = torch.where(br_xs > 0.0, br_xs, zeros)
br_ys = torch.where(br_ys > 0.0, br_ys, zeros)
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
if with_centripetal_shift:
tl_ctxs -= x_off
tl_ctys -= y_off
br_ctxs -= x_off
br_ctys -= y_off
tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
dim=3)
area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
rcentral = torch.zeros_like(ct_bboxes)
# magic nums from paper section 4.1
mu = torch.ones_like(area_bboxes) / 2.4
mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
(rcentral[..., 3] - rcentral[..., 1])).abs()
dists = area_ct_bboxes / area_rcentral
tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
ct_bboxes[..., 0] >= rcentral[..., 2])
tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
ct_bboxes[..., 1] >= rcentral[..., 3])
br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
ct_bboxes[..., 2] >= rcentral[..., 2])
br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
ct_bboxes[..., 3] >= rcentral[..., 3])
if with_embedding:
tl_emb = transpose_and_gather_feat(tl_emb, tl_inds)
tl_emb = tl_emb.view(batch, k, 1)
br_emb = transpose_and_gather_feat(br_emb, br_inds)
br_emb = br_emb.view(batch, 1, k)
dists = torch.abs(tl_emb - br_emb)
tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
# tl and br should have same class
tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
cls_inds = (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = dists > distance_threshold
# reject boxes based on widths and heights
width_inds = (br_xs <= tl_xs)
height_inds = (br_ys <= tl_ys)
# No use `scores[cls_inds]`, instead we use `torch.where` here.
# Since only 1-D indices with type 'tensor(bool)' are supported
# when exporting to ONNX, any other bool indices with more dimensions
# (e.g. 2-D bool tensor) as input parameter in node is invalid
negative_scores = -1 * torch.ones_like(scores)
scores = torch.where(cls_inds, negative_scores, scores)
scores = torch.where(width_inds, negative_scores, scores)
scores = torch.where(height_inds, negative_scores, scores)
scores = torch.where(dist_inds, negative_scores, scores)
if with_centripetal_shift:
scores[tl_ctx_inds] = -1
scores[tl_cty_inds] = -1
scores[br_ctx_inds] = -1
scores[br_cty_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 4)
bboxes = gather_feat(bboxes, inds)
clses = tl_clses.contiguous().view(batch, -1, 1)
clses = gather_feat(clses, inds).float()
return bboxes, scores, clses
def onnx_export(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple[Tensor, Tensor]: First tensor bboxes with shape
[N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score)
and second element is class labels of shape [N, num_det].
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(
img_metas) == 1
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
br_emb=br_embs[-1][img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
detections, labels = result_list[0]
# batch_size 1 here, [1, num_det, 5], [1, num_det]
return detections.unsqueeze(0), labels.unsqueeze(0)
|
open-mmlab/mmdetection
|
mmdet/models/dense_heads/corner_head.py
|
Python
|
apache-2.0
| 48,420
|
[
"Gaussian"
] |
79289cf83fb22833ef15ad6e8e97f92276eb92153b3a533e5a2ce7cb0b45c6cf
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
import numpy as np
from sys import platform
import subprocess
from DataSounds.external.sebastian.lilypond.interp import parse
from DataSounds.external.sebastian.midi.write_midi import SMF
from DataSounds.external.sebastian.core.transforms import stretch
from DataSounds.external.sebastian.core import notes
def note_classes(arr, scale):
'''
Get note classes from data range.
Parameters
----------
arr : arr
array to be arranged as note classes.
scale : an `build_scale` object
Consists of a Tone scaled. (C maj, pentatonic C, C min, etc.)
Returns
-------
Parameterized values of musical notes based on input array.
'''
minr = np.nanmin(arr)
maxr = np.nanmax(arr)
_, bins = np.histogram(arr, bins=len(scale) - 1, range=(minr, maxr))
return bins
def note_number(arr, scale):
'''
Get a relative number of notes, included in a chosen scale.
Parameters
----------
arr : arr
array to be arranged as note classes.
scale : an `build_scale` object
Returns
-------
mapping : arr
Note number of input array parameterized with chosen scale.
Note number follows the Sebastian sequence, and it can be
visualized for any number with:
sebastian.core.notes.name('2') will return musical note "E".
'''
x_notes = note_classes(arr, scale)
mapping = np.searchsorted(x_notes, arr, side='left').astype('f8')
mapping[np.isnan(arr)] = np.nan
return mapping
def note_on_classes(note, arr, scale):
if np.isnan(note):
return np.nan
x_notes = note_classes(arr, scale)
return np.searchsorted(x_notes, note, side='left').astype('f8')
def pentatonic_scale(tonic):
'''
Pentatonic scale, based on Major Pentatonic.
Not implemented on Sebastian.
References
----------
http://en.wikipedia.org/wiki/Pentatonic_scale
'''
return [tonic + i for i in [0, 2, 4, 1, 3]]
def blues_scale(tonic):
'''
Blues scale
References
----------
http://en.wikipedia.org/wiki/Blues_scale
'''
return [tonic + i for i in [0, 2, 4, -1, 1, 3, 5]]
def build_scale(key, mode='major', octaves=1):
'''
Build a scale from a key note.
Parameters
----------
key : Musical key.
Can be setted as a parameter while building scale.
Key should be written as "C", for C and "C#" for C sharp and
"Cb" for C flat.
mode : Musical mode.
'major' and 'minor' and 'pentatonic' are acceptable parameters.
octaves : int
number of octaves to be evaluated.
Returns
-------
scale_notes : sebastian.core.elements
Sequence of scale notes.
'''
if mode == 'major':
scale = notes.major_scale
elif mode == 'minor':
scale = notes.minor_scale
elif mode == 'pentatonic':
scale = pentatonic_scale
elif mode == 'blues':
scale = blues_scale
scale_notes = [notes.name(s).lower() + ("'" * octave)
for octave in range(octaves)
for s in scale(notes.value(key))]
return scale_notes
def note_name(number, scale):
'''
Transform a number to a note string, including np.nan as
musical rests.
'''
if np.isnan(number):
return "r"
else:
return scale[int(number)].replace('#', 'is')
def chord_scaled(arr, scale, period=12):
'''
Scales an note's array
'''
remainder = arr.size % period
if remainder:
fill = period - remainder
arr = np.append(arr, np.zeros(fill) * np.nan)
arr_scaled = np.int32([np.nansum(row) / len(row)
for row in arr.reshape((-1, period))])
root_scaled = [note_on_classes(note, arr, scale) for note in arr_scaled]
root = []
third = []
fifth = []
for note in root_scaled:
root.append(note_name(note, scale))
third.append(note_name(note, scale))
fifth.append(note_name(note, scale))
seq1 = parse(" ".join(root))
seq2 = parse(" ".join(third))
seq3 = parse(" ".join(fifth))
# chords = (seq1 * period) // (seq2 * period) // (seq3 * period)
chords = seq1 // seq2 // seq3
# return (chords | add({DURATION_64: chords[0][DURATION_64] * period}))
return (chords | stretch(period))
# return chords
def get_music(series, key='C', mode='major', octaves=2,
instruments=None, period=12):
'''
Returns music generated from an inserted series.
Parameters
----------
series : an array that could be an 2d-array.
key : Musical key.
Can be setted as a parameter while building scale.
Key should be written as "C", for C and "C#" for C sharp and
"Cb" for C flat, or any other key note (e.g. D, E, F, G, A, B).
mode : Music mode.
'major', 'minor' and 'pentatonic' are acceptable parameters.
More options of modes on `build_scale`.
octaves : Number of octaves, or list of octaves (just in case you
will use more than one series and want to change their specific
number of octaves).
As higher are the octaves higher pitch differences will occur
while representing your data.
instruments : list of MIDI instruments.
General MIDI Level 1 Instrument Patch Map can be found at:
http://en.wikipedia.org/wiki/General_MIDI
Acoustic Grand Piano is the default usage value '[0]' if any
instruments are declared.
Fewer examples:
[0] Acoustic Grand Piano
[18] Rock Organ
[23] Tango Accordion
[32] Acoustic Bass
[73] Flute
Complete list:
+---------------------------------------------------+
|Piano |
+===================================================+
| 0 Acoustic Grand Piano | 1 Bright Acoustic Piano |
+-------------------------+-------------------------+
| 2 Electric Grand Piano | 3 Honky-tonk Piano |
+-------------------------+-------------------------+
| 4 Electric Piano 1 | 5 Electric Piano 2 |
+-------------------------+-------------------------+
| 6 Harpsichord | 7 Clavinet |
+-------------------------+-------------------------+
+---------------------------------------------------+
|Chromatic Percussion |
+===================================================+
| 8 Celesta | 9 Glockenspiel |
+-------------------------+-------------------------+
| 10 Music Box | 11 Vibraphone |
+-------------------------+-------------------------+
| 12 Marimba | 13 Xylophone |
+-------------------------+-------------------------+
| 14 Tubular Bells | 15 Dulcimer |
+-------------------------+-------------------------+
+---------------------------------------------------+
|Organ |
+===================================================+
| 16 Drawbar Organ | 17 Percussive Organ |
+-------------------------+-------------------------+
| 18 Rock Organ | 19 Church Organ |
+-------------------------+-------------------------+
| 20 Reed Organ | 21 Accordion |
+-------------------------+-------------------------+
| 22 Harmonica | 23 Tango Accordion |
+-------------------------+-------------------------+
+-----------------------------------------------------+
|Guitar |
+=====================================================+
| 24 Acoustic Guitar(nylon)| 25 Acoustic Guitar(steel)|
+--------------------------+--------------------------+
| 26 Electric Guitar(jazz) | 27 Electric Guitar(clean)|
+--------------------------+--------------------------+
| 28 Electric Guitar(muted)| 29 Overdriven Guitar |
+--------------------------+--------------------------+
| 30 Distortion Guitar | 31 Guitar Harmonics |
+--------------------------+--------------------------+
+-----------------------------------------------------+
|Bass |
+=====================================================+
| 32 Acoustic Bass | 32 Electric Bass (finger)|
+--------------------------+--------------------------+
| 34 Electric Bass (pick) | 35 Fretless Bass |
+--------------------------+--------------------------+
| 36 Slap Bass 1 | 37 Slap Bass 2 |
+--------------------------+--------------------------+
| 38 Synth Bass 1 | 39 Synth Bass 2 |
+--------------------------+--------------------------+
+-----------------------------------------------------+
|Strings |
+=====================================================+
| 40 Violin | 41 Viola |
+--------------------------+--------------------------+
| 42 Cello | 43 Contrabass |
+--------------------------+--------------------------+
| 44 Tremolo String | 45 Pizzicato Strings |
+--------------------------+--------------------------+
| 46 Orchestral Harp | 47 Timpani |
+--------------------------+--------------------------+
+-----------------------------------------------------+
|Enseble |
+=====================================================+
| 48 String Ensemble 1 | 49 String Ensemble 2 |
+--------------------------+--------------------------+
| 50 Synth Strings 1 | 51 Synth Strings 2 |
+--------------------------+--------------------------+
| 52 Choir Aahs | 53 Voice Oohs |
+--------------------------+--------------------------+
| 54 Synth Choir | 55 Orchestra Hit |
+--------------------------+--------------------------+
+-----------------------------------------------------+
|Brass |
+=====================================================+
| 56 Trumpet | 57 Trombone |
+--------------------------+--------------------------+
| 58 Tuba | 59 Muted Trumpet |
+--------------------------+--------------------------+
| 60 French Horn | 61 Brass Section |
+--------------------------+--------------------------+
| 62 Synth Brass 1 | 63 Synth Brass 2 |
+--------------------------+--------------------------+
+-----------------------------------------------------+
| Reed |
+=====================================================+
| 64 Soprano Sax | 65 Alto Sax |
+--------------------------+--------------------------+
| 66 Tenor Sax | 67 Baritone Sax |
+--------------------------+--------------------------+
| 68 Oboe | 69 English Horn |
+--------------------------+--------------------------+
| 70 Bassoon | 71 Clarinet |
+--------------------------+--------------------------+
+-----------------------------------------------------+
| Pipe |
+=====================================================+
| 72 Piccolo | 73 Flute |
+--------------------------+--------------------------+
| 74 Recorder | 75 Pan Flute |
+--------------------------+--------------------------+
| 76 Blown bottle | 77 Shakuhachi |
+--------------------------+--------------------------+
| 78 Whistle | 79 Ocarina |
+--------------------------+--------------------------+
+-----------------------------------------------------+
| Synth Lead |
+=====================================================+
| 80 Lead 1 (square) | 81 Lead 2 (sawtooth) |
+--------------------------+--------------------------+
| 82 Lead 3 (calliope) | 83 Lead 4 chiff |
+--------------------------+--------------------------+
| 84 Lead 5 (charang) | 85 Lead 6 (voice) |
+--------------------------+--------------------------+
| 86 Lead 7 (fifths) | 87 Lead 8 (bass + lead) |
+--------------------------+--------------------------+
+-----------------------------------------------------+
| Synth Pad |
+=====================================================+
| 88 Pad 1 (new age) | 89 Pad 2 (warm) |
+--------------------------+--------------------------+
| 90 Pad 3 (polysynth) | 91 Pad 4 (choir) |
+--------------------------+--------------------------+
| 92 Pad 5 (bowed) | 93 Pad 6 (metallic) |
+--------------------------+--------------------------+
| 94 Pad 7 (halo) | 95 Pad 8 (sweep) |
+--------------------------+--------------------------+
+-----------------------------------------------------+
| Synth Effects |
+=====================================================+
| 96 FX 1 (rain) | 97 FX 2 (soundtrack) |
+--------------------------+--------------------------+
| 98 FX 3 (crystal) | 99 FX 4 (atmosphere) |
+--------------------------+--------------------------+
| 100 FX 5 (brightness) | 101 FX 6 (goblins) |
+--------------------------+--------------------------+
| 102 FX 7 (echoes) | 103 FX 8 (sci-fi) |
+--------------------------+--------------------------+
+-----------------------------------------------------+
| Ethnic |
+=====================================================+
| 104 Sitar | 105 Banjo |
+--------------------------+--------------------------+
| 106 Shamisen | 107 Koto |
+--------------------------+--------------------------+
| 108 Kalimba | 109 Bagpipe |
+--------------------------+--------------------------+
| 110 Fiddle | 111 Shanai |
+--------------------------+--------------------------+
+-----------------------------------------------------+
| Percussive |
+=====================================================+
| 112 Tinkle Bell | 113 Agogo |
+--------------------------+--------------------------+
| 114 Steel Drums | 115 Woodblock |
+--------------------------+--------------------------+
| 116 Taiko Drum | 117 Melodic Tom |
+--------------------------+--------------------------+
| 118 Synth Drum | 119 Reverse Cymbal |
+--------------------------+--------------------------+
+-----------------------------------------------------+
|Sound effects |
+=====================================================+
| 120 Guitar Fret Noise | 121 Breath Noise |
+--------------------------+--------------------------+
| 122 Seashore | 123 Bird Tweet |
+--------------------------+--------------------------+
| 124 Telephone Ring | 125 Helicopter |
+--------------------------+--------------------------+
| 126 Applause | 127 Gunshot |
+--------------------------+--------------------------+
period : int
parameter of chord_scaled function.
Returns
-------
midi_out : BytesIO object.
It can be written on a file or used by your way.
Example
-------
>>> data = np.random.random(10).reshape(2,5)
array([[ 0.13536875, 0.42212475, 0.26360219, 0.30153336,
0.62150923],
[ 0.49384405, 0.32503762, 0.85549822, 0.80212442,
0.70702405]])
>>> get_music(data, octaves=2, instruments=(0,23))
<io.BytesIO at 0x7f98201c9d40>
'''
midi_out = BytesIO()
series = np.array(series)
scales = []
melodies = []
if len(series.shape) == 1:
scale = build_scale(key, mode, octaves)
if all(np.isnan(series)):
melody = []
melodies.append(melody)
else:
snotes = note_number(series, scale)
melody = parse(' '.join([note_name(x, scale) for x in snotes]))
melodies.append(melody)
else:
for i in range(series.shape[0]):
if all(np.isnan(series[i])):
melody = []
melodies.append(melody)
else:
if isinstance(octaves, int):
scales.append(build_scale(key, mode, octaves))
else:
scales.append(build_scale(key, mode, octaves[i]))
snotes = note_number(series[i], scales[i])
melody = parse(' '.join([note_name(x, scales[i]) for x in snotes]))
melodies.append(melody)
# chords = chord_scaled(series, scale, period)
# Transform it to a MIDI file with chords.
# s = SMF([melody, chords], instruments=[0, 23])
if instruments is None:
s = SMF(melodies)
else:
s = SMF(melodies, instruments)
s.write(midi_out)
return midi_out
def w2Midi(name, BytesIo):
'''
Writes the output of `get_music` inside a '.midi' file on disk.
Parameters
----------
name : str
name of file
BytesIo : get_music output variable
variable of music generated with `get_music`
'''
muz_file = open(str(name)+'.midi', 'wb')
muz_file.write(BytesIo.getvalue())
muz_file.close()
def play(file):
"""Use system program to play MIDI files
We try here to use timidity as default software. Please, see `timidity
documentation<http://timidity.sourceforge.net/install.html>`_.
Parameters
----------
name : str
name of file
Example
-------
>>> file = "music.mid"
>>> play(file)
"""
# linux
if platform == "linux" or platform == "linux2":
if subprocess.call("timidity") == 0:
try:
subprocess.call(["timidity", str(file)])
except OSError:
print("You do not have appropriate software installed to "
"play MIDI files. See Timidity installation "
"http://timidity.sourceforge.net/install.html")
else:
try: subprocess.call(["totem", str(file)])
except OSError:
print("Maybe you do not have 'fluid-soundfont-gm' installed "
"to use it with totem.")
# MAC OS X
elif _platform == "darwin":
if subprocess.call("timidity") == 0:
try:
subprocess.call(["timidity", str(file)])
except:
print("You do not have appropriate software installed to "
"play MIDI files. See Timidity installation "
"http://timidity.sourceforge.net/install.html")
else:
try:
subprocess.call(["open", str(file)])
except OSError:
print("Seems that your 'open' program cannot play MIDI files")
# Windows
elif _platform == "win32":
try:
subprocess.call(["timidity", str(file)])
except OSError:
print("You do not have appropriate software installed to "
"play MIDI files. See Timidity installation "
"http://timidity.sourceforge.net/install.html")
|
DataSounds/DataSounds
|
src/DataSounds/sounds.py
|
Python
|
bsd-3-clause
| 21,176
|
[
"CRYSTAL"
] |
484a61bfeddb3cfc613423eb3b46e91a10c294b93fcec6ccf6316413bb7d46c4
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.248444
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/getaudiotracks.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class getaudiotracks(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(getaudiotracks, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_70866031 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2audiotracklist>
''')
for track in VFFSL(SL,"tracklist",True): # generated from line 4, col 2
write(u'''\t\t<e2audiotrack>
\t\t\t<e2audiotrackdescription>''')
_v = VFFSL(SL,"track.description",True) # u'$track.description' on line 6, col 29
if _v is not None: write(_filter(_v, rawExpr=u'$track.description')) # from line 6, col 29.
write(u'''</e2audiotrackdescription>
\t\t\t<e2audiotrackid>''')
_v = VFFSL(SL,"track.index",True) # u'$track.index' on line 7, col 20
if _v is not None: write(_filter(_v, rawExpr=u'$track.index')) # from line 7, col 20.
write(u'''</e2audiotrackid>
\t\t\t<e2audiotrackpid>''')
_v = VFFSL(SL,"track.pid",True) # u'$track.pid' on line 8, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$track.pid')) # from line 8, col 21.
write(u'''</e2audiotrackpid>
\t\t\t<e2audiotrackactive>''')
_v = VFFSL(SL,"track.active",True) # u'$track.active' on line 9, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$track.active')) # from line 9, col 24.
write(u'''</e2audiotrackactive>
\t\t</e2audiotrack>
''')
write(u'''</e2audiotracklist>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_70866031
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_getaudiotracks= 'respond'
## END CLASS DEFINITION
if not hasattr(getaudiotracks, '_initCheetahAttributes'):
templateAPIClass = getattr(getaudiotracks, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(getaudiotracks)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=getaudiotracks()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/web/getaudiotracks.py
|
Python
|
gpl-2.0
| 5,989
|
[
"VisIt"
] |
7e1157953391976779d64c7d1740cf5825831cf8bcdd4ef6a9775f96e31a4c17
|
"""Test trie.py."""
import pytest
INSERT_STRINGS = ['hi', 'test', '', 'foo bar']
NONEMPTY_STRINGS = ['hello', 'hi', 'h', 'foobar', 'quux']
WORDS = [
"artsier",
"artsiest",
"artsy",
"artwork",
"artwork's",
"artworks",
"arty",
"as",
"asbestos",
"asbestos's",
"ascend",
"ascendancy",
"ascendancy's",
"ascendant",
"ascendant's",
"ascendants",
"ascended",
"ascendency",
"ascendency's",
"ascendent",
"ascendent's",
"ascendents",
"ascending",
"ascends",
"ascension",
"ascension's",
"ascensions",
"ascent",
"ascent's",
"ascents",
"ascertain",
"ascertainable",
"ascertained",
"ascertaining",
"ascertains",
"ascetic",
"ascetic's",
"asceticism"
]
def test_empty_trie():
"""Test empty trie contains nothing."""
from .trie import Trie
assert not Trie().contains('hi')
@pytest.mark.parametrize('s', INSERT_STRINGS)
def test_trie_insert(s):
"""Test trie contains inserted string."""
from .trie import Trie
t = Trie()
t.insert(s)
assert t.contains(s)
@pytest.mark.parametrize('s', NONEMPTY_STRINGS)
def test_trie_false_on_truncations(s):
"""Test trie contains inserted string."""
from .trie import Trie
t = Trie()
t.insert(s)
assert not t.contains(s[:-1])
def test_trie_many_words():
"""Test trie containing many words."""
from .trie import Trie
t = Trie()
for word in WORDS:
t.insert(word)
for word in WORDS:
assert t.contains(word)
def test_trie_insert_dollar():
"""Test trie will reject inserting a string containing $."""
from .trie import Trie
with pytest.raises(ValueError):
Trie().insert('$')
def test_trie_contains_dollar():
"""Test trie will reject looking up string with $."""
from .trie import Trie
t = Trie()
t.insert('a')
assert not t.contains('a$ uh oh!')
def comesbefore(t, a, b):
"""Used in testing traversal methods."""
return b in t[t.index(a):]
def test_traversal_empty():
"""Test traversal of an empty tree returns []."""
from .trie import Trie
assert list(Trie().traverse()) == []
def test_traversal_basic():
"""Test traversal of a tree with an empty word."""
from .trie import Trie
t = Trie()
t.insert('')
assert list(t.traverse()) == ['']
def test_traversal_word():
"""Test traversal of a tree with a single-char word."""
from .trie import Trie
t = Trie()
t.insert('a')
assert list(t.traverse()) == ['a']
def test_traversal_word_deep():
"""Test traversal of a tree with a multi-char word."""
from .trie import Trie
t = Trie()
t.insert('aa')
assert list(t.traverse()) == ['aa']
def test_traversal_word_deep_start():
"""Test traversal of a tree with a multi-char word and start word."""
from .trie import Trie
t = Trie()
t.insert('a')
t.insert('aa')
assert list(t.traverse()) == ['a', 'aa']
def test_traversal_word_deep_bad_start():
"""Test traversal of a tree with a multi-char word and bad start word."""
from .trie import Trie
t = Trie()
t.insert('aa')
with pytest.raises(KeyError):
list(t.traverse('b'))
def test_starting():
"""Test auto complete working correctly."""
from .trie import Trie
t = Trie()
t.insert('a')
t.insert('ab')
t.insert('ba')
results = ['a', 'ab']
for item in list(t.traverse('a')):
assert item in results
def test_traversal_word_deep_2():
"""Test traversal of a tree with a multi-char word."""
from .trie import Trie
t = Trie()
t.insert('aaaaa')
assert list(t.traverse()) == ['aaaaa']
def test_traversal_word_order():
"""Test traversal of a tree is depth-first."""
from .trie import Trie
t = Trie()
t.insert('a')
t.insert('aa')
t.insert('b')
t.insert('bb')
result = list(t.traverse())
# because which branch we visit first is random, we have to figure
# out which branch we traversed first to determine if the search
# was depth-first
if comesbefore(result, 'a', 'b'):
assert comesbefore(result, 'aa', 'b')
else:
assert comesbefore(result, 'bb', 'a')
|
welliam/data-structures
|
src/test_trie.py
|
Python
|
mit
| 4,260
|
[
"VisIt"
] |
a011d091c6b84cbe79dfb2e85ae435b3dd7259e4377845491802b8b6eb3c9abe
|
#!/usr/bin/python
import cv
import datetime
import numpy, scipy, scipy.fftpack
import pylab
IMG_STACK_LEN = 100
ANALYSIS_LAYER = 6
FFT_CHAN_MIN = 3
FFT_CHAN_MAX = 20
FREQ_THRESH = 0.05
inputfps = 25
outputfps = 30
window1 = "Current"
window2 = "Oldest"
window3 = "Time Data"
window4 = "Fourier Transform"
imgList = []
cv.NamedWindow(window2,cv.CV_WINDOW_NORMAL)
fig = pylab.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
fig.canvas.draw()
freqChart = None
timeChart = None
pylab.ion()
def preProcessImage(inImg):
"""
Returns an image, which is a processed version of the input image inImg.
Currently just converts to gray scale.
"""
outImg = cv.CreateImage(cv.GetSize(inImg),8,1)
cv.CvtColor(inImg,outImg,cv.CV_BGR2GRAY)
for i in range(ANALYSIS_LAYER):
outImg = doPyrDown(outImg)
return(outImg)
# End of preProcessImage
def doPlot(dataMat,fftMat):
global timeChart,freqChart,ax1,ax2,fig
pixelNo = 28
sampleFft = []
freqs = []
vals = []
times = []
freqBinWidth = 1.0*inputfps/IMG_STACK_LEN
for x in range(IMG_STACK_LEN):
freq = 1.0*x*freqBinWidth
freqs.append(freq)
sampleFft.append(fftMat[pixelNo,x])
times.append(x*1.0/inputfps)
vals.append(dataMat[pixelNo,x])
# Throw away the DC component to help with scaling the graph.
# sample_fft[0]=sample_fft[1]
if (timeChart==None):
#pylab.xlim(0,50)
timeChart, = ax1.plot(times,vals)
pylab.xlabel("time (sec)")
pylab.ylabel("brightness")
else:
timeChart.set_xdata(times)
timeChart.set_ydata(vals)
if (freqChart==None):
pylab.xlim(0,50)
freqChart, = ax2.plot(freqs,sampleFft)
pylab.xlabel("freq (Hz)")
pylab.ylabel("amplitude")
else:
freqChart.set_xdata(freqs)
freqChart.set_ydata(sampleFft)
fig.canvas.draw()
print "doPlot done"
def getSpectra(imgList):
""" Calculates the fourier transforms (against time) of all pixels in
imgList.
imgList is a list of tuples (datetime,image).
Creates a 2 dimensional array, where one dimension is the pixel values in
the image, and the other is time, then calculates the fourier transform.
To give the frequency contributions of the values in each pixel.
"""
(width,height) = cv.GetSize(imgList[0][1])
nPixels = width * height
#print "Image Size = (%d x %d) - %d pixels. Number of Images = %d" \
# % (width,height,nPixels,len(imgList))
# Create a matrix with pixel values in the y direction, and time (frame no)
# in the x direction. This means we can do an FFT on each row to get
# frequency components of each pixel.
dataMat = cv.CreateMat(nPixels,len(imgList),cv.CV_32FC1)
for frameNo in range(len(imgList)):
for y in range(height-1):
for x in range(width-1):
pixelNo = y*width+x
pixelVal = float(imgList[frameNo][1][y,x]/255.0)
dataMat[pixelNo,frameNo] = pixelVal
cv.ShowImage(window3,dataMat)
fftMat = cv.CreateMat(nPixels,len(imgList),cv.CV_32FC1)
#(a,fftMax,b,c)= cv.MinMaxLoc(fftMat)
#print "fftMax=%f" % (fftMax)
fftMat_int = cv.CreateMat(nPixels,len(imgList),cv.CV_8UC1)
cv.DFT(dataMat,fftMat,cv.CV_DXT_ROWS)
cv.ConvertScale(fftMat,fftMat_int,1000)
cv.ShowImage(window4,fftMat_int)
# Apply frequency filter to FFT data
for x in range(0,FFT_CHAN_MIN):
for y in range(0,nPixels):
fftMat[y,x] = 0.0
#for x in range(FFT_CHAN_MAX,len(imgList)-1):
# for y in range(0,nPixels):
# fftMat[y,x] = 0.0
doPlot(dataMat,fftMat)
return fftMat
def pixelNo2xy(pixelNo,img):
(width,height) = cv.GetSize(img)
y = int(pixelNo / (width-1))
x = pixelNo - y*(width-1)
return (x,y)
def getEquivLoc(x,y,layer):
""" Returns the equivalent location to x,y in a different layer.
"""
xl = int((x+1)*2**(layer))
yl = int((y+0.5)*2**(layer))
#print "getEquivLoc(%d,%d,%d) -> (%d,%d)" % (x,y,layer,xl,yl)
return (xl,yl)
def doPyrDown(inImg):
"""
Returns an image that has been subjected to Gaussian downsampling via pyrDown.
Returned image is half the size of the original.
"""
(width,height)= cv.GetSize(inImg)
outSize = (width/2, height/2)
outImg = cv.CreateImage(outSize,8,1)
cv.PyrDown(inImg,outImg,cv.CV_GAUSSIAN_5x5)
return(outImg)
# end of doPyrDown
def main():
"""
Main program - controls grabbing images from video stream and loops around each frame.
"""
#camera = cv.CaptureFromFile("rtsp://192.168.1.18/live_mpeg4.sdp")
camera = cv.CaptureFromFile("testcards/sample1.mp4")
#camera = cv.CaptureFromCAM(0)
if (camera!=None):
frameSize = (640,480)
videoFormat = cv.FOURCC('p','i','m','1')
vw = cv.CreateVideoWriter("seizure_test.mpg",videoFormat, outputfps,frameSize,1)
cv.NamedWindow(window1,cv.CV_WINDOW_AUTOSIZE)
origImg = cv.QueryFrame(camera)
lastTime = datetime.datetime.now()
while (origImg):
# Preprocess, then add the new image to the list, along with the
# time it was recorded.
imgList.append(
(lastTime,
preProcessImage(origImg)
))
# Drop the oldest image off the list if we have enough in the list.
if (len(imgList)>IMG_STACK_LEN):
imgList.pop(0) # Remove first item
xorig = 0
yorig = 0
if (len(imgList) == IMG_STACK_LEN):
# imgList[] is now a list of tuples (time,image) containing the
# reduced size images -
spectra = getSpectra(imgList)
binWidth = 1.0*inputfps/IMG_STACK_LEN
#(a,fftMax,b,(freqNo,pixelNo))= cv.MinMaxLoc(spectra)
for freqNo in range(0,int(len(imgList)/2)):
for pixelNo in range(0,70):
if (abs(spectra[pixelNo,freqNo])>FREQ_THRESH):
print "PixelNo %d exceeds threshold (val=%f) in freq bin %d (%f Hz" % (pixelNo,abs(spectra[pixelNo,freqNo]),freqNo,freqNo*binWidth)
(xmax,ymax) = pixelNo2xy(pixelNo,imgList[0][1])
(xorig,yorig) = getEquivLoc(xmax,ymax,ANALYSIS_LAYER)
if (freqNo<10):
colour = cv.Scalar(255,1,1)
thickness = 1
elif (freqNo>10 and freqNo<20):
colour = cv.Scalar(1,255,1)
thickness = 5
elif (freqNo>20 and freqNo<30):
colour = cv.Scalar(1,1,255)
thickness = 10
elif (freqNo>30):
colour = cv.Scalar(255,255,255)
thickness = 20
cv.Circle(origImg, (xorig,yorig), 30, colour, thickness=thickness, lineType=-1, shift=0)
cv.WriteFrame(vw,origImg)
cv.ShowImage(window1,origImg)
cv.ShowImage(window2,imgList[0][1])
cv.WaitKey(1) # This is very important or ShowImage doesn't work!!
timeDiff = (datetime.datetime.now() - lastTime).total_seconds()
if (timeDiff<1./inputfps):
print "timediff=%f, 1/fps=%f" % (timeDiff,1./inputfps)
cv.WaitKey(1+int(1000.*(1./inputfps - timeDiff)))
# Note - there is something odd about this time calculation
# it does not seem to be consistent with the timestamps on the
# images.
timeDiff = (datetime.datetime.now() - lastTime).total_seconds()
fps = 1./timeDiff
print "timeDiff=%f, fps=%f fps" % (timeDiff,fps)
# Now get a new frame ready to start the loop again
origImg = cv.QueryFrame(camera)
lastTime = datetime.datetime.now()
print "no more images..."
else:
print "Error - failed to connect to camera"
# End of main()
if __name__ == "__main__":
main()
|
OpenSeizureDetector/OpenSeizureDetector
|
video_version/Seizure_Detector.py
|
Python
|
gpl-3.0
| 8,334
|
[
"Gaussian"
] |
38db4bbc27d0faf5d6deb141c55abe0450c73f9477d2178e822d4f2a4036403b
|
#!/usr/bin/env python
# megamapper executer
# by Nikolaus Obholzer, Jan 2012
import sys, re, tempfile, subprocess
import os, shutil
from galaxy import eggs
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def main():
# Handle input params
in_fname = sys.argv[1]
Mname = sys.argv[2]
chrom = sys.argv[3]
out_file1 = sys.argv[4]
out_file2 = sys.argv[5]
rscript_path = '/export/local_tools/MegaMapper/chrscan'
try:
#prepare command line
cmd = 'Rscript --vanilla %s %s %s %s %s %s' %(rscript_path,in_fname,out_file1,out_file2,Mname,chrom)
print cmd # for debugging
os.system(cmd)
finally:
sys.stdout.write( 'Megamapping complete.' )
# check that there are results in the output file
# if os.path.getsize( out_file1 ) >= 0:
# sys.stdout.write( 'Megamapping complete.' )
# else:
# stop_err( 'The output file is empty. Your input file may not have had SNPs, or there may be an error with your input file or settings.' )
if __name__ == "__main__":
main()
|
maxplanck-ie/Megamapper
|
chrscan.py
|
Python
|
bsd-3-clause
| 1,087
|
[
"Galaxy"
] |
2d02369b0eb11931bbf0561c25273d08ddfdce91acd05fb6f747f101537eb762
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import py2to3, PY3
if PY3:
unicode = str
@py2to3
class ItemList(object):
__slots__ = ['_item_class', '_common_attrs', '_items']
def __init__(self, item_class, common_attrs=None, items=None):
self._item_class = item_class
self._common_attrs = common_attrs
self._items = ()
if items:
self.extend(items)
def create(self, *args, **kwargs):
return self.append(self._item_class(*args, **kwargs))
def append(self, item):
self._check_type_and_set_attrs(item)
self._items += (item,)
return item
def _check_type_and_set_attrs(self, *items):
common_attrs = self._common_attrs or {}
for item in items:
if not isinstance(item, self._item_class):
raise TypeError("Only %s objects accepted, got %s."
% (self._item_class.__name__,
item.__class__.__name__))
for attr in common_attrs:
setattr(item, attr, common_attrs[attr])
return items
def extend(self, items):
self._items += self._check_type_and_set_attrs(*items)
def insert(self, index, item):
self._check_type_and_set_attrs(item)
items = list(self._items)
items.insert(index, item)
self._items = tuple(items)
def index(self, item, *start_and_end):
return self._items.index(item, *start_and_end)
def clear(self):
self._items = ()
def visit(self, visitor):
for item in self:
item.visit(visitor)
def __iter__(self):
return iter(self._items)
def __getitem__(self, index):
if not isinstance(index, slice):
return self._items[index]
items = self.__class__(self._item_class)
items._common_attrs = self._common_attrs
items.extend(self._items[index])
return items
def __setitem__(self, index, item):
if isinstance(index, slice):
self._check_type_and_set_attrs(*item)
else:
self._check_type_and_set_attrs(item)
items = list(self._items)
items[index] = item
self._items = tuple(items)
def __len__(self):
return len(self._items)
def __unicode__(self):
return u'[%s]' % ', '.join(unicode(item) for item in self)
|
henriqueguchi/SikuliServer
|
new/Lib/robot/model/itemlist.py
|
Python
|
mit
| 2,982
|
[
"VisIt"
] |
a2418f40e6deb566e1c5e29350b5cfb2bb1eb8a001e4d8f78ad6defcae1db7d4
|
import arabic_script.elements as ase
def encoding_cleanup():
raise NotImplementedError
def tatweel_removal(text):
"""
The Tatweel (elongation) is used to stretch words to indicate
prominence or simply to force vertical justification.
This symbol has no effect on the meaning of the word so it's
usually normalized.
Examples:
- A word without Tatweel: جميل
- The same word with Tatweel: جـــمـــيـــل
:param text: The text that we need to extract the Tatweel from.
:return: A text without Tatweels.
"""
if text is None:
return None
return text.replace(ase.TATWEEL, '')
def diacritic_removal(text):
"""
Since diacritics occur so infrequently, they are considered noise
by most researchers and are simply removed from the text.
Examples:
- A word without diacritics: جميل
- The same word with diacritics: جَمِيلٌ
:param text: The text that we need to extract diacritics from.
:return: A text without diacritics.
"""
if text is None:
return None
for diacritic in ase.DIACRITICS:
text = text.replace(diacritic, '')
return text
def punctuation_removal(text):
"""
Remove all punctuation marks from text
:param text:
:return: A punctuation-free text
"""
if text is None:
return None
for mark in ase.PUNCTUATION_MARKS:
if mark in ase.NUMBERS_PUNCTUATION_MARKS:
continue
text = text.replace(mark, '')
return text
def letter_normalization(text, egyptian=False):
"""
There are four letters in Arabic that are so often misspelled using
variants that researchers find it more helpful to completely make
these variants ambiguous (normalized).
1. The Hamzated forms of Alif -> Alif.
2. The Alif-Maqsura -> Ya (Only in Egypt).
3. The Ta-Marbuta -> Ha.
4. The non-Alif forms of Hamza -> Hamza letter.
However, this is sometimes may be problematic. Let's take the
name 'Ana' and the word 'Me' meaning for example, both words after
normalization are gonna produce the same word which's not going
to be interesting especially in named entity recognition.
Examples:
* Ana:
- Correct form: آنا
- After Normalization: انا
* Me:
- Correct form: أنا
- After Normalization: انا
:param text: The text we want to normalize its letters.
:param egyptian: To flag if we want to normalize the Alif-Maqsura.
:return: A letter-normalized string
"""
if text is None:
return None
if egyptian:
text = text.replace(ase.ALIF_MAQSURA, 'ي')
for form in ase.ALEF_HAMZA_FORMS:
text = text.replace(form, 'ا')
text = text.replace(ase.TA_MARBUTA, 'ه')
for form in ase.NON_ALIF_HAMZA_FORMS:
text = text.replace(form, ase.HAMZA)
return text
def clean_text(text):
"""
Cleans the word by removing punctuations, diacritics, non-letter
characters.
:param text: The word to clean
:return: A cleaned word that has nothing but letters.
"""
if text is None:
return None
# Remove whitespace characters from the beginning and the end
text = text.strip()
for letter in text:
if letter not in ase.LETTERS and letter != ' ':
text = text.replace(letter, '')
return text
|
ahmedaljazzar/arabic-nlp
|
normalization/orthographic_normalization.py
|
Python
|
agpl-3.0
| 3,504
|
[
"ASE"
] |
470f98b1613a34c195f1295371f6228ab6de1954768fde0f53aacdb7969ec3d7
|
import unittest
from test import support
from itertools import *
from weakref import proxy
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
class TestBasicOps(unittest.TestCase):
def pickletest(self, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
self.pickletest(accumulate(range(10))) # test pickling
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_chain_reducible(self):
operators = [copy.deepcopy,
lambda s: pickle.loads(pickle.dumps(s))]
for oper in operators:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
self.pickletest(chain('abc', 'def'), compare=list('abcdef'))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))):
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
self.pickletest(combinations(values, r)) # test pickling
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))):
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
self.pickletest(cwr(values,r)) # test pickling
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
self.pickletest(permutations(values, r)) # test pickling
@support.impl_detail("tuple resuse is CPython specific")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in (lambda a:copy.copy(a), lambda a:copy.deepcopy(a), lambda a:pickle.loads(pickle.dumps(a))):
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(list(islice(count(-maxsize-5), 10)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(list(islice(count(10, maxsize+5), 3)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(next(c), -8)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
self.pickletest(count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i, j)).replace('L', '')
if j == 1:
r2 = ('count(%r)' % i).replace('L', '')
else:
r2 = ('count(%r, %r)' % (i, j)).replace('L', '')
self.assertEqual(r1, r2)
self.pickletest(count(i, j))
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
self.assertEqual(take(10, pickle.loads(pickle.dumps(c))), list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c))), list('cabcabcabc'))
self.pickletest(cycle('abc'))
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR))):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR))):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2))):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c))), ans[1:])
c = filter(isEven, range(6))
self.pickletest(c)
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
self.pickletest(filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count())))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
self.pickletest(zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_zip_longest_pickling(self):
self.pickletest(zip_longest("abc", "def"))
self.pickletest(zip_longest("abc", "defgh"))
self.pickletest(zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(zip_longest("", "defgh"))
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
self.pickletest(product(*args))
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
self.pickletest(repeat(object='a', times=10))
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
c = map(tupleize, 'abc', count())
self.pickletest(c)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(c)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
self.pickletest(islice(range(100), *args))
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
self.pickletest(takewhile(underten, data))
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
self.pickletest(dropwhile(underten, data))
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
self.pickletest(iter(tee('abc')))
a, b = tee('abc')
self.pickletest(a, compare=ans)
self.pickletest(b, compare=ans)
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it))), accumulated[1:])
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... next(b)
... except StopIteration:
... pass
... return zip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctest the examples in the library reference
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_itertools.py
|
Python
|
mit
| 83,107
|
[
"GULP"
] |
44ecaa768a451446a28d682fa2acbd71dfc2a8d59ceccd8c9149f09604b31b81
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Create HTML reports."""
from __future__ import print_function, unicode_literals
from contextlib import contextmanager
from copy import deepcopy
import os.path as op
import time
import warnings
import numpy as np
import mne
from mne import read_proj, read_epochs
from mne.viz import plot_projs_topomap, plot_cov, plot_snr_estimate
from mne.viz._3d import plot_head_positions
from mne.report import Report
from mne.utils import _pl
from ._forward import _get_bem_src_trans
from ._paths import (get_raw_fnames, get_proj_fnames, get_report_fnames,
get_bad_fname, get_epochs_evokeds_fnames, safe_inserter)
from ._sss import (_load_trans_to, _head_pos_annot, _read_raw_prebad,
_get_t_window)
from ._viz import plot_good_coils, plot_chpi_snr_raw, trim_bg, mlab_offscreen
from ._utils import _fix_raw_eog_cals, _handle_dict
@contextmanager
def report_context():
import matplotlib
import matplotlib.pyplot as plt
style = {'axes.spines.right': 'off', 'axes.spines.top': 'off',
'axes.grid': True}
is_interactive = matplotlib.is_interactive()
plt.ioff()
old_backend = matplotlib.get_backend()
matplotlib.use('Agg', force=True)
try:
with plt.style.context(style):
yield
except Exception:
matplotlib.use(old_backend, force=True)
plt.interactive(is_interactive)
raise
def gen_html_report(p, subjects, structurals, run_indices=None):
"""Generate HTML reports."""
import matplotlib.pyplot as plt
if run_indices is None:
run_indices = [None] * len(subjects)
time_kwargs = dict()
if 'time_unit' in mne.fixes._get_args(mne.viz.plot_evoked):
time_kwargs['time_unit'] = 's'
for si, subj in enumerate(subjects):
struc = structurals[si]
report = Report(verbose=False)
print(' Processing subject %s/%s (%s)'
% (si + 1, len(subjects), subj))
# raw
fnames = get_raw_fnames(p, subj, 'raw', erm=False, add_splits=False,
run_indices=run_indices[si])
for fname in fnames:
if not op.isfile(fname):
raise RuntimeError('Cannot create reports until raw data '
'exist, missing:\n%s' % fname)
raw = [_read_raw_prebad(p, subj, fname, False) for fname in fnames]
_fix_raw_eog_cals(raw, 'all')
raw = mne.concatenate_raws(raw)
# sss
sss_fnames = get_raw_fnames(p, subj, 'sss', False, False,
run_indices[si])
has_sss = all(op.isfile(fname) for fname in sss_fnames)
sss_info = mne.io.read_raw_fif(sss_fnames[0]) if has_sss else None
bad_file = get_bad_fname(p, subj)
if bad_file is not None:
sss_info.load_bad_channels(bad_file)
if sss_info is not None:
sss_info = sss_info.info
# pca
pca_fnames = get_raw_fnames(p, subj, 'pca', False, False,
run_indices[si])
has_pca = all(op.isfile(fname) for fname in pca_fnames)
# epochs
epochs_fname, _ = get_epochs_evokeds_fnames(p, subj, p.analyses)
_, epochs_fname = epochs_fname
has_epochs = op.isfile(epochs_fname)
# whitening and source localization
inv_dir = op.join(p.work_dir, subj, p.inverse_dir)
has_fwd = op.isfile(op.join(p.work_dir, subj, p.forward_dir,
subj + p.inv_tag + '-fwd.fif'))
with report_context():
ljust = 25
#
# Head coils
#
section = 'Good HPI count'
if p.report_params.get('good_hpi_count', True) and p.movecomp:
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
figs = list()
captions = list()
for fname in fnames:
_, _, fit_data = _head_pos_annot(
p, subj, fname, prefix=' ')
if fit_data is None:
print('%s skipped, HPI count data not found (possibly '
'no params.*_limit values set?)' % (section,))
break
fig = plot_good_coils(fit_data, show=False)
fig.set_size_inches(10, 2)
fig.tight_layout()
figs.append(fig)
captions.append('%s: %s' % (section, op.split(fname)[-1]))
report.add_figs_to_section(figs, captions, section,
image_format='svg')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# cHPI SNR
#
section = 'cHPI SNR'
if p.report_params.get('chpi_snr', True) and p.movecomp:
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
figs = list()
captions = list()
for fname in fnames:
raw = mne.io.read_raw_fif(fname, allow_maxshield='yes')
t_window = _get_t_window(p, raw)
fig = plot_chpi_snr_raw(raw, t_window, show=False,
verbose=False)
fig.set_size_inches(10, 5)
fig.subplots_adjust(0.1, 0.1, 0.8, 0.95,
wspace=0, hspace=0.5)
figs.append(fig)
captions.append('%s: %s' % (section, op.split(fname)[-1]))
report.add_figs_to_section(figs, captions, section,
image_format='png') # svd too slow
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# Head movement
#
section = 'Head movement'
if p.report_params.get('head_movement', True) and p.movecomp:
print((' %s ... ' % section).ljust(ljust), end='')
t0 = time.time()
trans_to = _load_trans_to(p, subj, run_indices[si], raw)
figs = list()
captions = list()
for fname in fnames:
pos, _, _ = _head_pos_annot(
p, subj, fname, prefix=' ')
fig = plot_head_positions(pos=pos, destination=trans_to,
info=raw.info, show=False)
for ax in fig.axes[::2]:
"""
# tighten to the sensor limits
assert ax.lines[0].get_color() == (0., 0., 0., 1.)
mn, mx = np.inf, -np.inf
for line in ax.lines:
ydata = line.get_ydata()
if np.isfinite(ydata).any():
mn = min(np.nanmin(ydata), mn)
mx = max(np.nanmax(line.get_ydata()), mx)
"""
# always show at least 10cm span, and use tight limits
# if greater than that
coord = ax.lines[0].get_ydata()
for line in ax.lines:
if line.get_color() == 'r':
extra = line.get_ydata()[0]
mn, mx = coord.min(), coord.max()
md = (mn + mx) / 2.
mn = min([mn, md - 50., extra])
mx = max([mx, md + 50., extra])
assert (mn <= coord).all()
assert (mx >= coord).all()
ax.set_ylim(mn, mx)
fig.set_size_inches(10, 6)
fig.tight_layout()
figs.append(fig)
captions.append('%s: %s' % (section, op.split(fname)[-1]))
del trans_to
report.add_figs_to_section(figs, captions, section,
image_format='svg')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# Raw segments
#
if op.isfile(pca_fnames[0]):
raw_pca = [mne.io.read_raw_fif(fname) for fname in pca_fnames]
_fix_raw_eog_cals(raw_pca, 'all')
raw_pca = mne.concatenate_raws(raw_pca)
section = 'Raw segments'
if p.report_params.get('raw_segments', True) and has_pca:
times = np.linspace(raw.times[0], raw.times[-1], 12)[1:-1]
raw_plot = list()
for t in times:
this_raw = raw_pca.copy().crop(t - 0.5, t + 0.5)
this_raw.load_data()
this_raw._data[:] -= np.mean(this_raw._data, axis=-1,
keepdims=True)
raw_plot.append(this_raw)
raw_plot = mne.concatenate_raws(raw_plot)
for key in ('BAD boundary', 'EDGE boundary'):
raw_plot.annotations.delete(
np.where(raw_plot.annotations.description == key)[0])
new_events = np.linspace(
0, int(round(10 * raw.info['sfreq'])) - 1, 11).astype(int)
new_events += raw_plot.first_samp
new_events = np.array([new_events,
np.zeros_like(new_events),
np.ones_like(new_events)]).T
fig = raw_plot.plot(group_by='selection', butterfly=True,
events=new_events)
fig.axes[0].lines[-1].set_zorder(10) # events
fig.axes[0].set(xticks=np.arange(0, len(times)) + 0.5)
xticklabels = ['%0.1f' % t for t in times]
fig.axes[0].set(xticklabels=xticklabels)
fig.axes[0].set(xlabel='Center of 1-second segments')
fig.axes[0].grid(False)
for _ in range(len(fig.axes) - 1):
fig.delaxes(fig.axes[-1])
fig.set(figheight=(fig.axes[0].get_yticks() != 0).sum(),
figwidth=12)
fig.subplots_adjust(0.0, 0.0, 1, 1, 0, 0)
report.add_figs_to_section(fig, section + ' (processed)',
section, image_format='png')
#
# PSD
#
section = 'PSD'
if p.report_params.get('psd', True) and has_pca:
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
if p.lp_trans == 'auto':
lp_trans = 0.25 * p.lp_cut
else:
lp_trans = p.lp_trans
n_fft = 8192
fmax = raw.info['lowpass']
figs = [raw.plot_psd(fmax=fmax, n_fft=n_fft, show=False)]
captions = ['%s: Raw' % section]
fmax = p.lp_cut + 2 * lp_trans
figs.append(raw.plot_psd(fmax=fmax, n_fft=n_fft, show=False))
captions.append('%s: Raw (zoomed)' % section)
if op.isfile(pca_fnames[0]):
figs.append(raw_pca.plot_psd(fmax=fmax, n_fft=n_fft,
show=False))
captions.append('%s: Processed' % section)
# shared y limits
n = len(figs[0].axes) // 2
for ai, axes in enumerate(list(zip(
*[f.axes for f in figs]))[:n]):
ylims = np.array([ax.get_ylim() for ax in axes])
ylims = [np.min(ylims[:, 0]), np.max(ylims[:, 1])]
for ax in axes:
ax.set_ylim(ylims)
ax.set(title='')
for fig in figs:
fig.set_size_inches(8, 8)
with warnings.catch_warnings(record=True):
fig.tight_layout()
report.add_figs_to_section(figs, captions, section,
image_format='svg')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# SSP
#
section = 'SSP topomaps'
proj_nums = _handle_dict(p.proj_nums, subj)
if p.report_params.get('ssp_topomaps', True) and has_pca and \
np.sum(proj_nums) > 0:
assert sss_info is not None
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
figs = []
comments = []
proj_files = get_proj_fnames(p, subj)
if p.proj_extra is not None:
comments.append('Custom')
projs = read_proj(op.join(p.work_dir, subj, p.pca_dir,
p.proj_extra))
figs.append(plot_projs_topomap(projs, info=sss_info,
show=False))
if any(proj_nums[0]): # ECG
if 'preproc_ecg-proj.fif' in proj_files:
comments.append('ECG')
figs.append(_proj_fig(op.join(
p.work_dir, subj, p.pca_dir,
'preproc_ecg-proj.fif'), sss_info,
proj_nums[0], p.proj_meg, 'ECG'))
if any(proj_nums[1]): # EOG
if 'preproc_blink-proj.fif' in proj_files:
comments.append('Blink')
figs.append(_proj_fig(op.join(
p.work_dir, subj, p.pca_dir,
'preproc_blink-proj.fif'), sss_info,
proj_nums[1], p.proj_meg, 'EOG'))
if any(proj_nums[2]): # ERM
if 'preproc_cont-proj.fif' in proj_files:
comments.append('Continuous')
figs.append(_proj_fig(op.join(
p.work_dir, subj, p.pca_dir,
'preproc_cont-proj.fif'), sss_info,
proj_nums[2], p.proj_meg, 'ERM'))
captions = ['SSP epochs: %s' % c for c in comments]
report.add_figs_to_section(
figs, captions, section, image_format='svg',
comments=comments)
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# Source alignment
#
section = 'Source alignment'
source_alignment = p.report_params.get('source_alignment', True)
if source_alignment is True or isinstance(source_alignment, dict) \
and has_sss and has_fwd:
assert sss_info is not None
kwargs = source_alignment
if isinstance(source_alignment, dict):
kwargs = dict(**source_alignment)
else:
assert source_alignment is True
kwargs = dict()
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
captions = [section]
try:
from mayavi import mlab
except ImportError:
warnings.warn('Cannot plot alignment in Report, mayavi '
'could not be imported')
else:
subjects_dir = mne.utils.get_subjects_dir(
p.subjects_dir, raise_error=True)
bem, src, trans, _ = _get_bem_src_trans(
p, sss_info, subj, struc)
if len(mne.pick_types(sss_info)):
coord_frame = 'meg'
else:
coord_frame = 'head'
with mlab_offscreen():
fig = mlab.figure(bgcolor=(0., 0., 0.),
size=(1000, 1000))
for key, val in (
('info', sss_info),
('subjects_dir', subjects_dir), ('bem', bem),
('dig', True), ('coord_frame', coord_frame),
('show_axes', True), ('fig', fig),
('trans', trans), ('src', src)):
kwargs[key] = kwargs.get(key, val)
try_surfs = [('head-dense', 'inner_skull'),
('head', 'inner_skull'),
'head',
'inner_skull']
for surf in try_surfs:
try:
mne.viz.plot_alignment(surfaces=surf, **kwargs)
except Exception:
pass
else:
break
else:
raise RuntimeError('Could not plot any surface '
'for alignment:\n%s'
% (try_surfs,))
fig.scene.parallel_projection = True
view = list()
for ai, angle in enumerate([180, 90, 0]):
mlab.view(angle, 90, focalpoint=(0., 0., 0.),
distance=0.6, figure=fig)
view.append(mlab.screenshot(figure=fig))
mlab.close(fig)
view = trim_bg(np.concatenate(view, axis=1), 0)
report.add_figs_to_section(view, captions, section)
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# Drop log
#
section = 'Drop log'
if p.report_params.get('drop_log', True) and has_epochs:
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
epo = read_epochs(epochs_fname)
figs = [epo.plot_drop_log(subject=subj, show=False)]
captions = [repr(epo)]
report.add_figs_to_section(figs, captions, section,
image_format='svg')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# SNR
#
section = 'SNR'
if p.report_params.get('snr', None) is not None:
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
snrs = p.report_params['snr']
if not isinstance(snrs, (list, tuple)):
snrs = [snrs]
for snr in snrs:
assert isinstance(snr, dict)
analysis = snr['analysis']
name = snr['name']
times = snr.get('times', [0.1])
inv_dir = op.join(p.work_dir, subj, p.inverse_dir)
fname_inv = op.join(inv_dir,
safe_inserter(snr['inv'], subj))
fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif'
% (analysis, p.lp_cut, p.inv_tag,
p.eq_tag, subj))
if not op.isfile(fname_inv):
print(' Missing inv: %s'
% op.basename(fname_inv), end='')
elif not op.isfile(fname_evoked):
print(' Missing evoked: %s'
% op.basename(fname_evoked), end='')
else:
inv = mne.minimum_norm.read_inverse_operator(fname_inv)
this_evoked = mne.read_evokeds(fname_evoked, name)
figs = plot_snr_estimate(
this_evoked, inv, verbose='error')
figs.axes[0].set_ylim(auto=True)
captions = ('%s: %s["%s"] (N=%d)'
% (section, analysis, name,
this_evoked.nave))
report.add_figs_to_section(
figs, captions, section=section,
image_format='svg')
print('%5.1f sec' % ((time.time() - t0),))
#
# BEM
#
section = 'BEM'
if p.report_params.get('bem', True) and has_fwd:
caption = '%s: %s' % (section, struc)
bem, src, trans, _ = _get_bem_src_trans(
p, raw.info, subj, struc)
if not bem['is_sphere']:
subjects_dir = mne.utils.get_subjects_dir(
p.subjects_dir, raise_error=True)
mri_fname = op.join(subjects_dir, struc, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warnings.warn(
'Could not find MRI:\n%s\nIf using surrogate '
'subjects, use '
'params.report_params["bem"] = False to avoid '
'this warning', stacklevel=2)
else:
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
report.add_bem_to_section(struc, caption, section,
decim=10, n_jobs=1,
subjects_dir=subjects_dir)
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped (sphere)' % section)
else:
print(' %s skipped' % section)
#
# Whitening
#
section = 'Covariance'
if p.report_params.get('covariance', True):
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
cov_name = _get_cov_name(p, subj)
if cov_name is None:
print(' Missing covariance: %s'
% op.basename(cov_name), end='')
else:
noise_cov = mne.read_cov(cov_name)
info = mne.io.read_info(pca_fnames[0])
figs = plot_cov(
noise_cov, info, show=False, verbose='error')
captions = ['%s: %s' % (section, kind)
for kind in ('images', 'SVDs')]
report.add_figs_to_section(
figs, captions, section=section, image_format='png')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
section = 'Whitening'
if p.report_params.get('whitening', False):
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
whitenings = p.report_params['whitening']
if not isinstance(whitenings, (list, tuple)):
whitenings = [whitenings]
for whitening in whitenings:
assert isinstance(whitening, dict)
analysis = whitening['analysis']
name = whitening['name']
cov_name = _get_cov_name(p, subj, whitening.get('cov'))
# Load the inverse
fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif'
% (analysis, p.lp_cut, p.inv_tag,
p.eq_tag, subj))
if cov_name is None:
if whitening.get('cov') is not None:
extra = ': %s' % op.basename(whitening['cov'])
else:
extra = ''
print(' Missing cov%s' % extra, end='')
elif not op.isfile(fname_evoked):
print(' Missing evoked: %s'
% op.basename(fname_evoked), end='')
else:
noise_cov = mne.read_cov(cov_name)
evo = mne.read_evokeds(fname_evoked, name)
captions = ('%s: %s["%s"] (N=%d)'
% (section, analysis, name, evo.nave))
fig = evo.plot_white(noise_cov, verbose='error',
**time_kwargs)
report.add_figs_to_section(
fig, captions, section=section, image_format='png')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# Sensor space plots
#
section = 'Responses'
if p.report_params.get('sensor', False):
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
sensors = p.report_params['sensor']
if not isinstance(sensors, (list, tuple)):
sensors = [sensors]
for sensor in sensors:
assert isinstance(sensor, dict)
analysis = sensor['analysis']
name = sensor['name']
times = sensor.get('times', [0.1, 0.2])
fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif'
% (analysis, p.lp_cut, p.inv_tag,
p.eq_tag, subj))
if not op.isfile(fname_evoked):
print(' Missing evoked: %s'
% op.basename(fname_evoked), end='')
else:
this_evoked = mne.read_evokeds(fname_evoked, name)
figs = this_evoked.plot_joint(
times, show=False, ts_args=dict(**time_kwargs),
topomap_args=dict(outlines='head', **time_kwargs))
if not isinstance(figs, (list, tuple)):
figs = [figs]
captions = ('%s: %s["%s"] (N=%d)'
% (section, analysis, name,
this_evoked.nave))
captions = [captions] * len(figs)
report.add_figs_to_section(
figs, captions, section=section,
image_format='png')
print('%5.1f sec' % ((time.time() - t0),))
#
# Source estimation
#
section = 'Source estimation'
if p.report_params.get('source', False):
t0 = time.time()
print((' %s ... ' % section).ljust(ljust), end='')
sources = p.report_params['source']
if not isinstance(sources, (list, tuple)):
sources = [sources]
for source in sources:
assert isinstance(source, dict)
analysis = source['analysis']
name = source['name']
times = source.get('times', [0.1, 0.2])
# Load the inverse
inv_dir = op.join(p.work_dir, subj, p.inverse_dir)
fname_inv = op.join(inv_dir,
safe_inserter(source['inv'], subj))
fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif'
% (analysis, p.lp_cut, p.inv_tag,
p.eq_tag, subj))
if not op.isfile(fname_inv):
print(' Missing inv: %s'
% op.basename(fname_inv), end='')
elif not op.isfile(fname_evoked):
print(' Missing evoked: %s'
% op.basename(fname_evoked), end='')
else:
inv = mne.minimum_norm.read_inverse_operator(fname_inv)
this_evoked = mne.read_evokeds(fname_evoked, name)
title = ('%s: %s["%s"] (N=%d)'
% (section, analysis, name, this_evoked.nave))
stc = mne.minimum_norm.apply_inverse(
this_evoked, inv,
lambda2=source.get('lambda2', 1. / 9.),
method=source.get('method', 'dSPM'))
stc = abs(stc)
# get clim using the reject_tmin <->reject_tmax
stc_crop = stc.copy().crop(
p.reject_tmin, p.reject_tmax)
clim = source.get('clim', dict(kind='percent',
lims=[82, 90, 98]))
out = mne.viz._3d._limits_to_control_points(
clim, stc_crop.data, 'viridis',
transparent=True) # dummy cmap
if isinstance(out[0], (list, tuple, np.ndarray)):
clim = out[0] # old MNE
else:
clim = out[1] # new MNE (0.17+)
clim = dict(kind='value', lims=clim)
assert isinstance(stc, (mne.SourceEstimate,
mne.VolSourceEstimate))
bem, _, _, _ = _get_bem_src_trans(
p, raw.info, subj, struc)
is_usable = (isinstance(stc, mne.SourceEstimate) or
not bem['is_sphere'])
if not is_usable:
print('Only source estimates with individual '
'anatomy supported')
break
subjects_dir = mne.utils.get_subjects_dir(
p.subjects_dir, raise_error=True)
kwargs = dict(
colormap=source.get('colormap', 'viridis'),
transparent=source.get('transparent', True),
clim=clim, subjects_dir=subjects_dir)
imgs = list()
size = source.get('size', (800, 600))
if isinstance(stc, mne.SourceEstimate):
with mlab_offscreen():
brain = stc.plot(
hemi=source.get('hemi', 'split'),
views=source.get('views', ['lat', 'med']),
size=size,
foreground='k', background='w',
**kwargs)
for t in times:
brain.set_time(t)
imgs.append(
trim_bg(brain.screenshot(), 255))
brain.close()
else:
# XXX eventually plot_volume_source_estimtates
# will have an intial_time arg...
mode = source.get('mode', 'stat_map')
for t in times:
fig = stc.copy().crop(t, t).plot(
src=inv['src'], mode=mode, show=False,
**kwargs,
)
fig.set_dpi(100.)
fig.set_size_inches(*(np.array(size) / 100.))
imgs.append(fig)
captions = ['%2.3f sec' % t for t in times]
report.add_slider_to_section(
imgs, captions=captions, section=section,
title=title, image_format='png')
plt.close('all')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
report_fname = get_report_fnames(p, subj)[0]
report.save(report_fname, open_browser=False, overwrite=True)
def _proj_fig(fname, info, proj_nums, proj_meg, kind):
import matplotlib.pyplot as plt
proj_nums = np.array(proj_nums, int)
assert proj_nums.shape == (3,)
projs = read_proj(fname)
epochs = fname.replace('-proj.fif', '-epo.fif')
n_col = proj_nums.max()
rs_topo = 3
if op.isfile(epochs):
epochs = mne.read_epochs(epochs)
evoked = epochs.average()
rs_trace = 2
else:
rs_trace = 0
n_row = proj_nums.astype(bool).sum() * (rs_topo + rs_trace)
shape = (n_row, n_col)
fig = plt.figure(figsize=(n_col * 2, n_row * 0.75))
used = np.zeros(len(projs), int)
ri = 0
for count, ch_type in zip(proj_nums, ('grad', 'mag', 'eeg')):
if count == 0:
continue
if ch_type == 'eeg':
meg, eeg = False, True
else:
meg, eeg = ch_type, False
ch_names = [info['ch_names'][pick]
for pick in mne.pick_types(info, meg=meg, eeg=eeg)]
idx = np.where([np.in1d(ch_names, proj['data']['col_names']).all()
for proj in projs])[0]
if len(idx) != count:
raise RuntimeError('Expected %d %s projector%s for channel type '
'%s based on proj_nums but got %d in %s'
% (count, kind, _pl(count), ch_type, len(idx),
fname))
if proj_meg == 'separate':
assert not used[idx].any()
else:
assert (used[idx] <= 1).all()
used[idx] += 1
these_projs = [deepcopy(projs[ii]) for ii in idx]
for proj in these_projs:
sub_idx = [proj['data']['col_names'].index(name)
for name in ch_names]
proj['data']['data'] = proj['data']['data'][:, sub_idx]
proj['data']['col_names'] = ch_names
topo_axes = [plt.subplot2grid(
shape, (ri * (rs_topo + rs_trace), ci),
rowspan=rs_topo) for ci in range(count)]
# topomaps
with warnings.catch_warnings(record=True):
plot_projs_topomap(these_projs, info=info, show=False,
axes=topo_axes)
plt.setp(topo_axes, title='', xlabel='')
topo_axes[0].set(ylabel=ch_type)
if rs_trace:
trace_axes = [plt.subplot2grid(
shape, (ri * (rs_topo + rs_trace) + rs_topo, ci),
rowspan=rs_trace) for ci in range(count)]
for proj, ax in zip(these_projs, trace_axes):
this_evoked = evoked.copy().pick_channels(ch_names)
p = proj['data']['data']
assert p.shape == (1, len(this_evoked.data))
with warnings.catch_warnings(record=True): # tight_layout
this_evoked.plot(
picks=np.arange(len(this_evoked.data)), axes=[ax])
ax.texts = []
trace = np.dot(p, this_evoked.data)[0]
trace *= 0.8 * (np.abs(ax.get_ylim()).max() /
np.abs(trace).max())
ax.plot(this_evoked.times, trace, color='#9467bd')
ax.set(title='', ylabel='', xlabel='')
ri += 1
assert used.all() and (used <= 2).all()
fig.subplots_adjust(0.1, 0.1, 0.95, 1, 0.3, 0.3)
return fig
def _get_cov_name(p, subj, cov_name=None):
# just the first for now
if cov_name is None:
if p.inv_names:
cov_name = (safe_inserter(p.inv_names[0], subj) +
('-%d' % p.lp_cut) + p.inv_tag + '-cov.fif')
elif p.runs_empty: # erm cov
new_run = safe_inserter(p.runs_empty[0], subj)
cov_name = new_run + p.pca_extra + p.inv_tag + '-cov.fif'
if cov_name is not None:
cov_dir = op.join(p.work_dir, subj, p.cov_dir)
cov_name = op.join(cov_dir, cov_name)
if not op.isfile(cov_name):
cov_name = None
return cov_name
|
kambysese/mnefun
|
mnefun/_report.py
|
Python
|
bsd-3-clause
| 37,750
|
[
"Mayavi"
] |
e3b6fff17edb174c4b177a10a774040d62f339c07f02e1700325cf21d7b715b4
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Extra form fields and widgets.
import logging
import simplejson
import urllib
from django.forms import Widget, Field
from django import forms
from django.forms.util import ErrorList, ValidationError, flatatt
from django.forms.fields import MultiValueField, CharField, ChoiceField, BooleanField
from django.forms.widgets import MultiWidget, Select, TextInput, Textarea, HiddenInput, Input
from django.utils import formats
from django.utils.safestring import mark_safe
from django.utils.encoding import StrAndUnicode, force_unicode
import desktop.lib.i18n
from desktop.lib.i18n import smart_str
LOG = logging.getLogger(__name__)
class SplitDateTimeWidget(forms.MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
The date_class and time_class attributes specify class names to be given
specifically to the corresponding DateInput and TimeInput widgets.
"""
date_format = formats.get_format('DATE_INPUT_FORMATS')[0]
time_format = formats.get_format('TIME_INPUT_FORMATS')[0]
def __init__(self, attrs=None, date_format=None, time_format=None, date_class='date', time_class='time'):
date_attrs = dict(attrs)
time_attrs = dict(attrs)
if 'class' in date_attrs:
date_classes = [clss for clss in date_attrs['class'].split() if clss != date_class]
date_classes.append(date_class)
date_attrs['class'] = ' '.join(date_classes)
else:
date_attrs['class'] = date_class
if 'class' in time_attrs:
time_classes = [clss for clss in time_attrs['class'].split() if clss != time_class]
time_classes.append(time_class)
time_attrs['class'] = ' '.join(time_classes)
else:
time_attrs['class'] = time_class
widgets = (forms.DateInput(attrs=date_attrs, format=date_format),
forms.TimeInput(attrs=time_attrs, format=time_format))
del attrs['class']
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class MultipleInputWidget(Widget):
"""
Together with MultipleInputField, represents repeating a form element many times,
and representing a list of values for that element.
This could be made generic to work with any widget, but currently
renders itself as a regular old <input>.
"""
def __init__(self, attrs=None):
super(MultipleInputWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ()
if attrs is None or "count" not in attrs:
count = 5
else:
count = attrs["count"]
count = max(len(value) + 1, count)
s = ""
for i in range(count):
if value is not None and i < len(value):
v = value[i]
s += '<input name="%s" value="%s"></input>' % (name, v)
else:
s += '<input name="%s"></input>' % name
return s
def value_from_datadict(self, data, files, name):
# Sometimes this is a QueryDict, and sometimes ar regular dict,
# so we adapt:
non_empty = lambda x: len(x) != 0
return filter(non_empty, data.getlist(name))
class MultipleInputField(Field):
widget = MultipleInputWidget
def __init__(self, *args, **kwargs):
super(MultipleInputField, self).__init__(*args, **kwargs)
def clean(self, value):
return value
OTHER_VAL, OTHER_PRES = "__other__", "Other..."
class ChoiceOrOtherWidget(MultiWidget):
"""
Together with ChoiceOrOtherField represents a drop-down and an "other"
text-box.
This may not map well onto an AJAX model, since in that world
the JS presentation will handle sending only one value.
"""
def __init__(self, attrs=None, choices=()):
self.choices = choices
self.values = [ val for pres, val in choices if val != OTHER_VAL ]
widgets = (
Select(attrs=attrs, choices=choices),
TextInput(attrs=attrs)
)
super(ChoiceOrOtherWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value in self.values:
return [value, ""]
else:
return [OTHER_VAL, value]
class ChoiceOrOtherField(MultiValueField):
def __init__(self, choices, initial=None, *args, **kwargs):
assert not kwargs.get('required', False), "required=True is not supported"
allchoices = [ x for x in choices ] # Force choices into a list.
allchoices.append( (OTHER_VAL, OTHER_PRES) )
self.widget = ChoiceOrOtherWidget(choices=allchoices)
choice_initial, other_initial = None, None
if initial is not None:
# Match initial against one of the values
if initial in [ x for x, y in choices ]:
choice_initial = initial
else:
choice_initial = OTHER_VAL
other_initial = initial
fields = [
ChoiceField(required=False, choices=allchoices),
CharField(required=False)
]
# Be careful not to make the initial value a tuple;
# it's checked explicitly to be a list in MultiWidget's
# render.
super(ChoiceOrOtherField, self).__init__(fields, initial=[choice_initial, other_initial], *args, **kwargs)
def compress(self, data_list):
if len(data_list) == 0:
return None
if data_list[0] == OTHER_VAL:
return data_list[1]
else:
if data_list[1]:
raise ValidationError("Either select from the drop-down or select %s" % OTHER_PRES)
return data_list[0]
class KeyValueWidget(Textarea):
def render(self, name, value, attrs=None):
# If we have a dictionary, render back into a string.
if isinstance(value, dict):
value = " ".join("=".join([k, v]) for k, v in value.iteritems())
return super(KeyValueWidget, self).render(name, value, attrs)
class KeyValueField(CharField):
"""
Represents an input area for key/value pairs in the following format:
"<key1>=<val1> <key2>=<value2>...."
clean() returns a dictionary of parsed key/value pairs.
"""
widget = KeyValueWidget
def __init__(self, *args, **kwargs):
super(KeyValueField, self).__init__(*args, **kwargs)
def clean(self, value):
"""Converts the raw key=val text to a dictionary of key/val pairs"""
super(KeyValueField, self).clean(value)
try:
return dict(kvpair.split('=', 2) for kvpair in value.split())
except Exception:
raise ValidationError("Not in key=value format.")
class UnicodeEncodingField(ChoiceOrOtherField):
"""
The cleaned value of the field is the actual encoding, not a tuple
"""
CHOICES = [
('utf-8', 'Unicode UTF8'),
('utf-16', 'Unicode UTF16'),
('latin_1', 'Western ISO-8859-1'),
('latin_9', 'Western ISO-8859-15'),
('cyrillic', 'Cryrillic'),
('arabic', 'Arabic'),
('greek', 'Greek'),
('hebrew', 'Hebrew'),
('shift_jis', 'Japanese (Shift-JIS)'),
('euc-jp', 'Japanese (EUC-JP)'),
('iso2022_jp', 'Japanese (ISO-2022-JP)'),
('euc-kr', 'Korean (EUC-KR)'),
('iso2022-kr', 'Korean (ISO-2022-KR)'),
('gbk', 'Chinese Simplified (GBK)'),
('big5hkscs', 'Chinese Traditional (Big5-HKSCS)'),
('ascii', 'ASCII'),
]
def __init__(self, initial=None, *args, **kwargs):
ChoiceOrOtherField.__init__(self, UnicodeEncodingField.CHOICES, initial, *args, **kwargs)
def clean(self, value):
encoding = value[0] == OTHER_VAL and value[1] or value[0]
if encoding and not desktop.lib.i18n.validate_encoding(encoding):
raise forms.ValidationError("'%s' encoding is not available" % (encoding,))
return encoding
class MultiForm(object):
"""
Initialize this with the necessary sub-forms, and then
call bind(request).
TODO(philip): Should users use this by extending
it? Or is this really a forms.Field subclass.
"""
def __init__(self, prefix='', **kwargs):
"""
prefix is prepended to the prefix of the member forms
Keyword arguments are:
key=form_class, key2=form_class2, ...
The form_class can be a Form, a Formset, or a MultiForm.
It is currently not possible to specify ctor arguments to the form_class.
"""
self._form_types = kwargs
self._is_bound = False
self._prefix = prefix
def __str__(self):
return 'MultForm at %s' % (self._prefix)
def add_prefix(self, name):
"""Returns the subform name with a prefix prepended, if the prefix is set"""
return self._prefix and ('%s.%s' % (self._prefix, name)) or name
def get_subforms(self):
"""get_subforms() -> An iterator over (name, subform)"""
assert self._is_bound
return self._forms.iteritems()
def has_subform_data(self, subform_name, data):
"""Test if data contains any information bound for the subform"""
prefix = self.add_prefix(subform_name)
return len([ k.startswith(prefix) for k in data.keys() ]) != 0
def add_subform(self, name, form_cls, data=None):
"""Dynamically extend this MultiForm to include a new subform"""
self._form_types[name] = form_cls
self._bind_one(name, form_cls, data)
def remove_subform(self, name):
"""Dynamically remove a subform. Raises KeyError."""
del self._form_types[name]
if self._forms.has_key(name):
del self._forms[name]
def bind(self, data=None, instances=None):
self._is_bound = True
self._forms = {}
for key, form_cls in self._form_types.iteritems():
instance = instances is not None and instances.get(key) or None
self._bind_one(key, form_cls, data, instance=instance)
def _bind_one(self, key, form_cls, data=None, instance=None):
prefix = self.add_prefix(key)
if issubclass(form_cls, MultiForm):
member = form_cls(prefix=prefix)
member.bind(data=data)
elif instance is not None:
member = form_cls(data=data, prefix=prefix, instance=instance)
else:
member = form_cls(data=data, prefix=prefix)
self._forms[key] = member
def __getattr__(self, key):
assert self._is_bound
return self._forms.get(key)
def is_valid(self):
assert self._is_bound
r = True
# Explicitly iterate through all of them; we don't want
# to abort early, since we want each form's is_valid to be run.
for f in self._forms.values():
if not f.is_valid():
LOG.error(smart_str(f.errors))
r = False
return r
class SubmitButton(Input):
"""
A widget that presents itself as a submit button.
"""
input_type = "submit"
def render(self, name, value, attrs=None):
if value is None:
value = 'True'
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name, value=value)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(value)
return mark_safe(u'<button%s>%s</button>' % (flatatt(final_attrs), getattr(self, "label", "Submit")))
class ManagementForm(forms.Form):
add = BooleanField(widget=SubmitButton,required=False)
next_form_id = forms.IntegerField(widget=forms.HiddenInput, initial=0)
def __init__(self, add_label='+', *args, **kwargs):
super(ManagementForm, self).__init__(*args, **kwargs)
self.fields["add"].label = add_label
self.fields["add"].widget.label = add_label
def new_form_id(self):
"""
new_form_id() -> The id for the next member of the formset. Increment hidden value.
The ManagementForm needs to keep track of a monotonically increasing id, so that
new member forms don't reuse ids of deleted forms.
"""
# Hack. self.data is supposed to be immutable.
res = self.form_counts()
data2 = self.data.copy()
data2[self.add_prefix('next_form_id')] = str(res + 1)
self.data = data2
return res
def form_counts(self):
"""form_counts() -> The max number of forms, some could be non-existent (deleted)."""
try:
return int(self.data[ self.add_prefix('next_form_id') ])
except KeyError:
return self.fields['next_form_id'].initial
class BaseSimpleFormSet(StrAndUnicode):
"""
Manages multiple instances of the same form, and easily modifies how many of said
form there are.
This is similar to django.forms.formsets.BaseFormSet,
but is hopefully simpler.
We take a base form (that's passed in via the simple_formset_factory
machinery), and initialize it with prefix="prefix/N/", for integer
values of N. "perfix/add" specifies generating an extra empty one,
and "prefix/N/_delete" specifies deleting them.
"""
def __init__(self, data=None, prefix=None, initial=None):
self.is_bound = data is not None
assert prefix, "Prefix is required."
self.prefix = prefix
# The initial is sometimes set before the ctor, especially when used in a MultiForm,
# which doesn't allow passing custom ctor arguments.
self.initial = initial or getattr(self, 'initial', initial)
self.data = data
self._non_form_errors = None
self._errors = None
self._construct_forms()
def make_prefix(self, i):
return "%s-%s" % (self.prefix, i)
def _construct_mgmt_form(self):
if self.data:
form = ManagementForm(data=self.data, prefix=self.prefix, add_label=self.add_label)
if not form.is_valid():
raise forms.ValidationError('Management form missing for %s' % (self.prefix))
else:
# A new unbound formset
n_initial = self.initial and len(self.initial) or 0
form = ManagementForm(prefix=self.prefix,
add_label=self.add_label,
initial={ 'next_form_id': n_initial })
self.management_form = form
def empty_form(self):
f = self.form(prefix=self.make_prefix("TEMPLATE"))
f.fields["_exists"] = BooleanField(initial=True, widget=HiddenInput)
f.fields["_deleted"] = BooleanField(initial=True, required=False, widget=SubmitButton)
return f
def _construct_forms(self):
self._construct_mgmt_form()
self.forms = []
if not self.is_bound:
if self.initial is not None:
for i, data in enumerate(self.initial):
self.forms.append(self.form(initial=data, prefix=self.make_prefix(i)))
else:
self.forms = []
else:
for i in range(0, self.management_form.form_counts()):
# Since the form might be "not valid", you can't use
# cleaned_data to get at these fields.
if self.make_prefix(i) + "-_exists" in self.data:
if self.data.get(self.make_prefix(i) + "-_deleted") != "True":
f = self.form(data=self.data, prefix=self.make_prefix(i))
self.forms.append(f)
if self.management_form.is_valid() and self.management_form.cleaned_data["add"]:
self.add_form()
for f in self.forms:
f.fields["_exists"] = BooleanField(initial=True, widget=HiddenInput)
# Though _deleted is marked as initial=True, the value is only transmitted
# if this is the button that's clicked, so the real default is False.
f.fields["_deleted"] = BooleanField(initial=True, required=False, widget=SubmitButton)
f.fields["_deleted"].widget.label = "(x)"
def add_form(self):
"""Programatically add a form"""
prefix = self.make_prefix(self.management_form.new_form_id())
member = self.form(prefix=prefix)
self.forms.append(member)
def clean(self):
"""Hook for custom cleaning."""
pass
def full_clean(self):
"""Simlar to formsets.py:full_clean"""
self._errors = []
if not self.is_bound:
return
for f in self.forms:
self._errors.append(f.errors)
try:
self.clean()
except ValidationError, e:
self._non_form_errors = e.messages
@property
def errors(self):
if self._errors is None:
self.full_clean()
return self._errors
def non_form_errors(self):
if self._non_form_errors is not None:
return self._non_form_errors
return ErrorList()
def is_valid(self):
if not self.is_bound:
return False
valid = True
# Iterate through all, to find all errors, not just first ones.
for i, f in enumerate(self.forms):
if bool(self.errors[i]) or not f.is_valid():
valid = False
return valid and not bool(self.non_form_errors())
def simple_formset_factory(form, add_label="+", formset=BaseSimpleFormSet, initial=None):
"""Return a FormSet for the given form class."""
attrs = {
'form': form,
'add_label': add_label,
'initial': initial
}
return type(form.__name__ + 'SimpleFormSet', (formset,), attrs)
class DependencyAwareForm(forms.Form):
"""
Inherit from this class and add
(condition name, condition value, child name) tuples
to self.dependencies to describe dependencies between
certain form fields.
The semantic meaning is that the field named "child name"
is required if and only if the field "condition name"
has value "condition value".
For an example, visit the jframegallery ("fields with dependencies").
"""
def clean(self):
ret = super(DependencyAwareForm, self).clean()
if self.errors:
return
for cond, required_value, child in self.dependencies:
if self.cleaned_data.get(cond) == required_value:
child_val = self.cleaned_data.get(child)
if child_val in [None, '']:
self._errors.setdefault(child, []).append("%s is required if %s is %s" % (child, cond, str(required_value)))
return ret
def _calculate_data(self):
"""
Returns a "dict" with mappings between ids, desired values, and ids.
"""
def data(cond, required_value, child):
"""Calculates data for single item."""
return self.add_prefix(cond), str(required_value), self.add_prefix(child)
return [ data(*x) for x in self.dependencies ]
def render_dep_metadata(self):
return urllib.quote_plus(simplejson.dumps(self._calculate_data(), separators=(',', ':')))
|
pwong-mapr/private-hue
|
desktop/core/src/desktop/lib/django_forms.py
|
Python
|
apache-2.0
| 18,460
|
[
"VisIt"
] |
6b4d05f0b028914680830508cbfee79818860b752d64a637800eaf906198a3dc
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import itertools
import json
import os
import unittest
import numpy as np
from monty.json import MontyDecoder
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.defects.core import Interstitial, Substitution, Vacancy
from pymatgen.analysis.structure_matcher import (
ElementComparator,
FrameworkComparator,
OccupancyComparator,
OrderDisorderElementComparator,
PointDefectComparator,
StructureMatcher,
)
from pymatgen.core import PeriodicSite
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord import find_in_coord_list_pbc
from pymatgen.util.testing import PymatgenTest
class StructureMatcherTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"), "r") as fp:
entries = json.load(fp, cls=MontyDecoder)
self.struct_list = [e.structure for e in entries]
self.oxi_structs = [
self.get_structure("Li2O"),
Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.Li2O")),
]
def test_ignore_species(self):
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiFePO4.cif"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
m = StructureMatcher(ignored_species=["Li"], primitive_cell=False, attempt_supercell=True)
self.assertTrue(m.fit(s1, s2))
self.assertTrue(m.fit_anonymous(s1, s2))
groups = m.group_structures([s1, s2])
self.assertEqual(len(groups), 1)
s2.make_supercell((2, 1, 1))
ss1 = m.get_s2_like_s1(s2, s1, include_ignored_species=True)
self.assertAlmostEqual(ss1.lattice.a, 20.820740000000001)
self.assertEqual(ss1.composition.reduced_formula, "LiFePO4")
self.assertEqual(
{k.symbol: v.symbol for k, v in m.get_best_electronegativity_anonymous_mapping(s1, s2).items()},
{"Fe": "Fe", "P": "P", "O": "O"},
)
def test_get_supercell_size(self):
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.9)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu", "Ag"], [[0] * 3] * 5)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
sm = StructureMatcher(supercell_size="volume")
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="num_sites")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size="Ag")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size=["Ag", "Cu"])
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="wfieoh")
self.assertRaises(ValueError, sm._get_supercell_size, s1, s2)
def test_cmp_fstruct(self):
sm = StructureMatcher()
s1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
s2 = np.array([[0.11, 0.22, 0.33]])
frac_tol = np.array([0.02, 0.03, 0.04])
mask = np.array([[False, False]])
mask2 = np.array([[True, False]])
self.assertRaises(ValueError, sm._cmp_fstruct, s2, s1, frac_tol, mask.T)
self.assertRaises(ValueError, sm._cmp_fstruct, s1, s2, frac_tol, mask.T)
self.assertTrue(sm._cmp_fstruct(s1, s2, frac_tol, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol / 2, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol, mask2))
def test_cart_dists(self):
sm = StructureMatcher()
l = Lattice.orthorhombic(1, 2, 3)
s1 = np.array([[0.13, 0.25, 0.37], [0.1, 0.2, 0.3]])
s2 = np.array([[0.11, 0.22, 0.33]])
s3 = np.array([[0.1, 0.2, 0.3], [0.11, 0.2, 0.3]])
s4 = np.array([[0.1, 0.2, 0.3], [0.1, 0.6, 0.7]])
mask = np.array([[False, False]])
mask2 = np.array([[False, True]])
mask3 = np.array([[False, False], [False, False]])
mask4 = np.array([[False, True], [False, True]])
n1 = (len(s1) / l.volume) ** (1 / 3)
n2 = (len(s2) / l.volume) ** (1 / 3)
self.assertRaises(ValueError, sm._cart_dists, s2, s1, l, mask.T, n2)
self.assertRaises(ValueError, sm._cart_dists, s1, s2, l, mask.T, n1)
d, ft, s = sm._cart_dists(s1, s2, l, mask, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [-0.01, -0.02, -0.03]))
self.assertTrue(np.allclose(s, [1]))
# check that masking best value works
d, ft, s = sm._cart_dists(s1, s2, l, mask2, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [0.02, 0.03, 0.04]))
self.assertTrue(np.allclose(s, [0]))
# check that averaging of translation is done properly
d, ft, s = sm._cart_dists(s1, s3, l, mask3, n1)
self.assertTrue(np.allclose(d, [0.08093341] * 2))
self.assertTrue(np.allclose(ft, [0.01, 0.025, 0.035]))
self.assertTrue(np.allclose(s, [1, 0]))
# check distances are large when mask allows no 'real' mapping
d, ft, s = sm._cart_dists(s1, s4, l, mask4, n1)
self.assertTrue(np.min(d) > 1e8)
self.assertTrue(np.min(ft) > 1e8)
def test_get_mask(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
result = [
[True, False, True, False],
[True, False, True, False],
[True, True, False, True],
]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertEqual(inds, [2])
# test supercell with match
result = [
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s1, s2, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertTrue(np.allclose(inds, np.array([4])))
# test supercell without match
result = [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test s2_supercell
result = [
[1, 1, 1],
[1, 1, 1],
[0, 0, 1],
[0, 0, 1],
[1, 1, 0],
[1, 1, 0],
[0, 0, 1],
[0, 0, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, False)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test for multiple translation indices
s1 = Structure(l, ["Cu", "Ag", "Cu", "Ag", "Ag"], [[0] * 3] * 5)
s2 = Structure(l, ["Ag", "Cu", "Ag"], [[0] * 3] * 3)
result = [[1, 0, 1, 0, 0], [0, 1, 0, 1, 1], [1, 0, 1, 0, 0]]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 1)
self.assertTrue(np.allclose(inds, [0, 2]))
def test_get_supercells(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.5)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
scs = list(sm._get_supercells(s1, s2, 8, False))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 4)
self.assertEqual(len(x[1]), 24)
self.assertEqual(len(scs), 48)
scs = list(sm._get_supercells(s2, s1, 8, True))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 24)
self.assertEqual(len(x[1]), 4)
self.assertEqual(len(scs), 48)
def test_fit(self):
"""
Take two known matched structures
1) Ensure match
2) Ensure match after translation and rotations
3) Ensure no-match after large site translation
4) Ensure match after site shuffling
"""
sm = StructureMatcher()
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test rotational/translational invariance
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, np.array([0.4, 0.7, 0.9]))
self.struct_list[1].apply_operation(op)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test failure under large atomic translation
self.struct_list[1].translate_sites([0], [0.4, 0.4, 0.2], frac_coords=True)
self.assertFalse(sm.fit(self.struct_list[0], self.struct_list[1]))
self.struct_list[1].translate_sites([0], [-0.4, -0.4, -0.2], frac_coords=True)
# random.shuffle(editor._sites)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test FrameworkComporator
sm2 = StructureMatcher(comparator=FrameworkComparator())
lfp = self.get_structure("LiFePO4")
nfp = self.get_structure("NaFePO4")
self.assertTrue(sm2.fit(lfp, nfp))
self.assertFalse(sm.fit(lfp, nfp))
# Test anonymous fit.
self.assertEqual(sm.fit_anonymous(lfp, nfp), True)
self.assertAlmostEqual(sm.get_rms_anonymous(lfp, nfp)[0], 0.060895871160262717)
# Test partial occupancies.
s1 = Structure(
Lattice.cubic(3),
[{"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
s2 = Structure(
Lattice.cubic(3),
[{"Fe": 0.25}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.75}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertFalse(sm.fit(s1, s2))
self.assertFalse(sm.fit(s2, s1))
s2 = Structure(
Lattice.cubic(3),
[{"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertEqual(sm.fit_anonymous(s1, s2), True)
self.assertAlmostEqual(sm.get_rms_anonymous(s1, s2)[0], 0)
# test symmetric
sm_coarse = sm = StructureMatcher(
comparator=ElementComparator(),
ltol=0.6,
stol=0.6,
angle_tol=6,
)
s1 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s1.vasp")
s2 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s2.vasp")
self.assertEqual(sm_coarse.fit(s1, s2), True)
self.assertEqual(sm_coarse.fit(s2, s1), False)
self.assertEqual(sm_coarse.fit(s1, s2, symmetric=True), False)
self.assertEqual(sm_coarse.fit(s2, s1, symmetric=True), False)
def test_oxi(self):
"""Test oxidation state removal matching"""
sm = StructureMatcher()
self.assertFalse(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
sm = StructureMatcher(comparator=ElementComparator())
self.assertTrue(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
def test_primitive(self):
"""Test primitive cell reduction"""
sm = StructureMatcher(primitive_cell=True)
self.struct_list[1].make_supercell([[2, 0, 0], [0, 3, 0], [0, 0, 1]])
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
def test_class(self):
# Tests entire class as single working unit
sm = StructureMatcher()
# Test group_structures and find_indices
out = sm.group_structures(self.struct_list)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
self.assertEqual(sum(map(len, out)), len(self.struct_list))
for s in self.struct_list[::2]:
s.replace_species({"Ti": "Zr", "O": "Ti"})
out = sm.group_structures(self.struct_list, anonymous=True)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
def test_mix(self):
structures = [
self.get_structure("Li2O"),
self.get_structure("Li2O2"),
self.get_structure("LiFePO4"),
]
for fname in ["POSCAR.Li2O", "POSCAR.LiFePO4"]:
structures.append(Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, fname)))
sm = StructureMatcher(comparator=ElementComparator())
groups = sm.group_structures(structures)
for g in groups:
formula = g[0].composition.reduced_formula
if formula in ["Li2O", "LiFePO4"]:
self.assertEqual(len(g), 2)
else:
self.assertEqual(len(g), 1)
def test_left_handed_lattice(self):
"""Ensure Left handed lattices are accepted"""
sm = StructureMatcher()
s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li3GaPCO7.json"))
self.assertTrue(sm.fit(s, s))
def test_as_dict_and_from_dict(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.2,
angle_tol=2,
primitive_cell=False,
scale=False,
comparator=FrameworkComparator(),
)
d = sm.as_dict()
sm2 = StructureMatcher.from_dict(d)
self.assertEqual(sm2.as_dict(), d)
def test_no_scaling(self):
sm = StructureMatcher(ltol=0.1, stol=0.1, angle_tol=2, scale=False, comparator=ElementComparator())
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
self.assertTrue(sm.get_rms_dist(self.struct_list[0], self.struct_list[1])[0] < 0.0008)
def test_supercell_fit(self):
sm = StructureMatcher(attempt_supercell=False)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9_distorted.json"))
self.assertFalse(sm.fit(s1, s2))
sm = StructureMatcher(attempt_supercell=True)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
def test_get_lattices(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l1 = Lattice.from_parameters(1, 2.1, 1.9, 90, 89, 91)
l2 = Lattice.from_parameters(1.1, 2, 2, 89, 91, 90)
s1 = Structure(l1, [], [])
s2 = Structure(l2, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s2.lattice))
self.assertEqual(len(lattices), 16)
l3 = Lattice.from_parameters(1.1, 2, 20, 89, 91, 90)
s3 = Structure(l3, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s3.lattice))
self.assertEqual(len(lattices), 0)
def test_find_match1(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [0.7, 0.5, 0.375]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=True, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
fc = s2.frac_coords + match[3]
fc -= np.round(fc)
self.assertAlmostEqual(np.sum(fc), 0.9)
self.assertAlmostEqual(np.sum(fc[:, :2]), 0.1)
cart_dist = np.sum(match[1] * (l.volume / 3) ** (1 / 3))
self.assertAlmostEqual(cart_dist, 0.15)
def test_find_match2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si"], [[0, 0, 0.1], [0, 0, 0.2]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [0, 0.1, -0.95]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=False, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
s2.translate_sites(range(len(s2)), match[3])
self.assertAlmostEqual(np.sum(s2.frac_coords) % 1, 0.3)
self.assertAlmostEqual(np.sum(s2.frac_coords[:, :2]) % 1, 0)
def test_supercell_subsets(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="volume",
)
sm_no_s = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [0, 2, 1, 3, 4, 5]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test when s1 is exact supercell of s2
result = sm.get_s2_like_s1(s1, s2)
for a, b in zip(s1, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
self.assertTrue(sm_no_s.fit(s1, s2))
self.assertTrue(sm_no_s.fit(s2, s1))
rms = (0.048604032430991401, 0.059527539448807391)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, s1), rms))
# test when the supercell is a subset of s2
subset_supercell = s1.copy()
del subset_supercell[0]
result = sm.get_s2_like_s1(subset_supercell, s2)
self.assertEqual(len(result), 6)
for a, b in zip(subset_supercell, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(subset_supercell, s2))
self.assertTrue(sm.fit(s2, subset_supercell))
self.assertFalse(sm_no_s.fit(subset_supercell, s2))
self.assertFalse(sm_no_s.fit(s2, subset_supercell))
rms = (0.053243049896333279, 0.059527539448807336)
self.assertTrue(np.allclose(sm.get_rms_dist(subset_supercell, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, subset_supercell), rms))
# test when s2 (once made a supercell) is a subset of s1
s2_missing_site = s2.copy()
del s2_missing_site[1]
result = sm.get_s2_like_s1(s1, s2_missing_site)
for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2_missing_site))
self.assertTrue(sm.fit(s2_missing_site, s1))
self.assertFalse(sm_no_s.fit(s1, s2_missing_site))
self.assertFalse(sm_no_s.fit(s2_missing_site, s1))
rms = (0.029763769724403633, 0.029763769724403987)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2_missing_site), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2_missing_site, s1), rms))
def test_get_s2_large_s2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=False,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
l2 = Lattice.orthorhombic(1.01, 2.01, 3.01)
s2 = Structure(l2, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
s2.make_supercell([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
result = sm.get_s2_like_s1(s1, s2)
for x, y in zip(s1, result):
self.assertLess(x.distance(y), 0.08)
def test_get_mapping(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [2, 0, 1, 3, 5, 4]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test the mapping
s2.make_supercell([2, 1, 1])
# equal sizes
for i, x in enumerate(sm.get_mapping(s1, s2)):
self.assertEqual(s1[x].species, s2[i].species)
del s1[0]
# s1 is subset of s2
for i, x in enumerate(sm.get_mapping(s2, s1)):
self.assertEqual(s1[i].species, s2[x].species)
# s2 is smaller than s1
del s2[0]
del s2[1]
self.assertRaises(ValueError, sm.get_mapping, s2, s1)
def test_get_supercell_matrix(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-2, 0, 0], [0, 1, 0], [0, 0, 1]]).all())
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([[1, -1, 0], [0, 0, -1], [0, 1, 0]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
# test when the supercell is a subset
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
)
del s1[0]
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
def test_subset(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(10, 20, 30)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
mindists = np.min(s1.lattice.get_all_distances(s1.frac_coords, result.frac_coords), axis=0)
self.assertLess(np.max(mindists), 1e-6)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with not enough sites in s1
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Cl"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
self.assertEqual(sm.get_s2_like_s1(s1, s2), None)
def test_out_of_cell_s2_like_s1(self):
l = Lattice.cubic(5)
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, -0.02], [0, 0, 0.001], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.98], [0, 0, 0.99], [0.7, 0.4, 0.5]])
new_s2 = StructureMatcher(primitive_cell=False).get_s2_like_s1(s1, s2)
dists = np.sum((s1.cart_coords - new_s2.cart_coords) ** 2, axis=-1) ** 0.5
self.assertLess(np.max(dists), 0.1)
def test_disordered_primitive_to_ordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.75, 0.5, 0.5]]
prim = Structure(lp, [{"Na": 0.5}, {"Cl": 0.5}], pcoords)
supercell = Structure(ls, ["Na", "Cl"], scoords)
supercell.make_supercell([[-1, 1, 0], [0, 1, 1], [1, 0, 0]])
self.assertFalse(sm_sites.fit(prim, supercell))
self.assertTrue(sm_atoms.fit(prim, supercell))
self.assertRaises(ValueError, sm_atoms.get_s2_like_s1, prim, supercell)
self.assertEqual(len(sm_atoms.get_s2_like_s1(supercell, prim)), 4)
def test_ordered_primitive_to_disordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.5, 0, 0], [0.25, 0.5, 0.5], [0.75, 0.5, 0.5]]
s1 = Structure(lp, ["Na", "Cl"], pcoords)
s2 = Structure(ls, [{"Na": 0.5}, {"Na": 0.5}, {"Cl": 0.5}, {"Cl": 0.5}], scoords)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_atoms.fit(s1, s2))
def test_disordered_to_disordered(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
coords = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Cl": 0.5}], coords)
s2 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Br": 0.5}], coords)
self.assertFalse(sm_atoms.fit(s1, s2))
def test_occupancy_comparator(self):
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.6, "K": 0.4}, "Cl"], pcoords)
s2 = Structure(lp, [{"Xa": 0.4, "Xb": 0.6}, "Cl"], pcoords)
s3 = Structure(lp, [{"Xa": 0.5, "Xb": 0.5}, "Cl"], pcoords)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OccupancyComparator(),
)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_sites.fit(s1, s3))
def test_electronegativity(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PAsO4S4.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PNO4Se4.json"))
self.assertEqual(
sm.get_best_electronegativity_anonymous_mapping(s1, s2),
{
Element("S"): Element("Se"),
Element("As"): Element("N"),
Element("Fe"): Element("Fe"),
Element("Na"): Element("Na"),
Element("P"): Element("P"),
Element("O"): Element("O"),
},
)
self.assertEqual(len(sm.get_all_anonymous_mappings(s1, s2)), 2)
# test include_dist
dists = {Element("N"): 0, Element("P"): 0.0010725064}
for mapping, d in sm.get_all_anonymous_mappings(s1, s2, include_dist=True):
self.assertAlmostEqual(dists[mapping[Element("As")]], d)
def test_rms_vs_minimax(self):
# This tests that structures with adjusted RMS less than stol, but minimax
# greater than stol are treated properly
# stol=0.3 gives exactly an ftol of 0.1 on the c axis
sm = StructureMatcher(ltol=0.2, stol=0.301, angle_tol=1, primitive_cell=False)
l = Lattice.orthorhombic(1, 2, 12)
sp = ["Si", "Si", "Al"]
s1 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.5]])
s2 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.6]])
self.assertArrayAlmostEqual(sm.get_rms_dist(s1, s2), (0.32 ** 0.5 / 2, 0.4))
self.assertEqual(sm.fit(s1, s2), False)
self.assertEqual(sm.fit_anonymous(s1, s2), False)
self.assertEqual(sm.get_mapping(s1, s2), None)
class PointDefectComparatorTest(PymatgenTest):
def test_defect_matching(self):
# SETUP DEFECTS FOR TESTING
# symmorphic defect test set
s_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CsSnI3.cif")) # tetragonal CsSnI3
identical_Cs_vacs = [Vacancy(s_struc, s_struc[0]), Vacancy(s_struc, s_struc[1])]
identical_I_vacs_sublattice1 = [
Vacancy(s_struc, s_struc[4]),
Vacancy(s_struc, s_struc[5]),
Vacancy(s_struc, s_struc[8]),
Vacancy(s_struc, s_struc[9]),
] # in plane halides
identical_I_vacs_sublattice2 = [
Vacancy(s_struc, s_struc[6]),
Vacancy(s_struc, s_struc[7]),
] # out of plane halides
pdc = PointDefectComparator()
# NOW TEST DEFECTS
# test vacancy matching
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[0])) # trivial vacancy test
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[1])) # vacancies on same sublattice
for i, j in itertools.combinations(range(4), 2):
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice1[i], identical_I_vacs_sublattice1[j]))
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice2[0], identical_I_vacs_sublattice2[1]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# both vacancies, but different specie types
identical_I_vacs_sublattice1[0],
)
)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same specie type, different sublattice
identical_I_vacs_sublattice2[0],
)
)
# test substitutional matching
sub_Cs_on_I_sublattice1_set1 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[0].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice1_set2 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[1].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice2 = PeriodicSite("Cs", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
sub_Rb_on_I_sublattice2 = PeriodicSite("Rb", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial substitution test
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong specie)
Substitution(s_struc, sub_Cs_on_I_sublattice2),
Substitution(s_struc, sub_Rb_on_I_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong sublattice)
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice2),
)
)
# test symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by saturatated_
# interstitial_structure function)
inter_H_sublattice1_set1 = PeriodicSite("H", [0.0, 0.75, 0.25], s_struc.lattice)
inter_H_sublattice1_set2 = PeriodicSite("H", [0.0, 0.75, 0.75], s_struc.lattice)
inter_H_sublattice2 = PeriodicSite("H", [0.57796112, 0.06923687, 0.56923687], s_struc.lattice)
inter_H_sublattice3 = PeriodicSite("H", [0.25, 0.25, 0.54018268], s_struc.lattice)
inter_He_sublattice3 = PeriodicSite("He", [0.25, 0.25, 0.54018268], s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice3),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(s_struc, inter_H_sublattice3),
Interstitial(s_struc, inter_He_sublattice3),
)
)
# test non-symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by
# saturatated_interstitial_structure function)
ns_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CuCl.cif"))
ns_inter_H_sublattice1_set1 = PeriodicSite("H", [0.06924513, 0.06308959, 0.86766528], ns_struc.lattice)
ns_inter_H_sublattice1_set2 = PeriodicSite("H", [0.43691041, 0.36766528, 0.06924513], ns_struc.lattice)
ns_inter_H_sublattice2 = PeriodicSite("H", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
ns_inter_He_sublattice2 = PeriodicSite("He", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# different interstitials (wrong sublattice)
Interstitial(ns_struc, ns_inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(ns_struc, ns_inter_H_sublattice2),
Interstitial(ns_struc, ns_inter_He_sublattice2),
)
)
# test influence of charge on defect matching (default is to be charge agnostic)
vac_diff_chg = identical_Cs_vacs[0].copy()
vac_diff_chg.set_charge(3.0)
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
chargecheck_pdc = PointDefectComparator(check_charge=True) # switch to PDC which cares about charge state
self.assertFalse(chargecheck_pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
# test different supercell size
# (comparing same defect but different supercells - default is to not check for this)
sc_agnostic_pdc = PointDefectComparator(check_primitive_cell=True)
sc_scaled_s_struc = s_struc.copy()
sc_scaled_s_struc.make_supercell([2, 2, 3])
sc_scaled_I_vac_sublatt1_ps1 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[0].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_ps2 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt2_ps = PeriodicSite(
"I",
identical_I_vacs_sublattice2[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_defect1 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps1)
sc_scaled_I_vac_sublatt1_defect2 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps2)
sc_scaled_I_vac_sublatt2_defect = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt2_ps)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect site but between different supercells
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[1],
# same coords, different lattice structure
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[1], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same sublattice, different coords
sc_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
sc_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defects (wrong sublattice)
sc_scaled_I_vac_sublatt2_defect,
)
)
# test same structure size, but scaled lattice volume
# (default is to not allow these to be equal, but check_lattice_scale=True allows for this)
vol_agnostic_pdc = PointDefectComparator(check_lattice_scale=True)
vol_scaled_s_struc = s_struc.copy()
vol_scaled_s_struc.scale_lattice(s_struc.volume * 0.95)
vol_scaled_I_vac_sublatt1_defect1 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[4])
vol_scaled_I_vac_sublatt1_defect2 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[5])
vol_scaled_I_vac_sublatt2_defect = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[6])
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect (but vol change)
vol_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same defect, different sublattice point (and vol change)
vol_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
vol_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defect (wrong sublattice)
vol_scaled_I_vac_sublatt2_defect,
)
)
# test identical defect which has had entire lattice shifted
shift_s_struc = s_struc.copy()
shift_s_struc.translate_sites(range(len(s_struc)), [0.2, 0.3, 0.4], frac_coords=True, to_unit_cell=True)
shifted_identical_Cs_vacs = [
Vacancy(shift_s_struc, shift_s_struc[0]),
Vacancy(shift_s_struc, shift_s_struc[1]),
]
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but shifted)
shifted_identical_Cs_vacs[0],
)
)
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and shifted)
shifted_identical_Cs_vacs[1],
)
)
# test uniform lattice shift within non-symmorphic structure
shift_ns_struc = ns_struc.copy()
shift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
shift_ns_inter_H_sublattice1_set1 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set1.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
shift_ns_inter_H_sublattice1_set2 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set2.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# trivially same defect (but shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# same defect on different sublattice point (and shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set2),
)
)
# test a rotational + supercell type structure transformation (requires check_primitive_cell=True)
rotated_s_struc = s_struc.copy()
rotated_s_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
rotated_identical_Cs_vacs = [
Vacancy(rotated_s_struc, rotated_s_struc[0]),
Vacancy(rotated_s_struc, rotated_s_struc[1]),
]
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but rotated)
rotated_identical_Cs_vacs[0],
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_Cs_vacs[0], rotated_identical_Cs_vacs[0]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice (and rotated)
rotated_identical_Cs_vacs[1],
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and rotated)
rotated_identical_Cs_vacs[1],
)
)
# test a rotational + supercell + shift type structure transformation for non-symmorphic structure
rotANDshift_ns_struc = ns_struc.copy()
rotANDshift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
rotANDshift_ns_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
ns_vac_Cs_set1 = Vacancy(ns_struc, ns_struc[0])
rotANDshift_ns_vac_Cs_set1 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[0])
rotANDshift_ns_vac_Cs_set2 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[1])
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# trivially same defect (but rotated and sublattice shifted)
rotANDshift_ns_vac_Cs_set1,
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# same defect on different sublattice point (shifted and rotated)
rotANDshift_ns_vac_Cs_set2,
)
)
if __name__ == "__main__":
unittest.main()
|
richardtran415/pymatgen
|
pymatgen/analysis/tests/test_structure_matcher.py
|
Python
|
mit
| 47,994
|
[
"VASP",
"pymatgen"
] |
6d1df3dd4700664e72f33e7c65c958c33cb2517bdb831e4b5e6a54fc28a8ad2c
|
# coding=utf-8
from vtk import *
dicom_image_reader = vtk.vtkDICOMImageReader()
dicom_image_reader.SetDirectoryName("D:\\CodeProject\\Hover\\Data\\Dicom\\02ef8f31ea86a45cfce6eb297c274598\\series-000001\\")
dicom_image_reader.SetDataByteOrderToLittleEndian()
dicom_image_reader.SetDataSpacing(3.2, 3.2, 1.5)
dicom_image_reader.Update()
print(dicom_image_reader.GetDataSpacing())
print(dicom_image_reader.GetImagePositionPatient())
print( dicom_image_reader.GetImageOrientationPatient())
print("H:" + str(dicom_image_reader.GetHeight()) + "W:" + str(dicom_image_reader.GetWidth()))
reader_image_cast = vtk.vtkImageCast()
reader_image_cast.SetInputConnection(dicom_image_reader.GetOutputPort())
reader_image_cast.SetOutputScalarTypeToUnsignedShort()
reader_image_cast.Update()
|
comedate/VolumeRendering
|
render_reader.py
|
Python
|
mit
| 797
|
[
"VTK"
] |
941b01964511e2a863876b11575b35858a9de1414bed0e8a0c6e45aaf3d0620b
|
import vtk
rectGridReader = vtk.vtkRectilinearGridReader()
rectGridReader.SetFileName("D:/Notebooks_Bogota2017/SS_2017/data/jet4_0.500.vtk")
rectGridReader.Update()
#------------ CHALLENGE ONE ----------------------
rectGridOutline = vtk.vtkRectilinearGridOutlineFilter()
rectGridOutline.SetInputData(rectGridReader.GetOutput())
rectGridGeom = vtk.vtkRectilinearGridGeometryFilter()
rectGridGeom.SetInputData(rectGridReader.GetOutput())
rectGridGeom.SetExtent(0, 128, 0, 0, 0, 128)
rectGridOutlineMapper = vtk.vtkPolyDataMapper()
rectGridOutlineMapper.SetInputConnection(rectGridOutline.GetOutputPort())
rectGridGeomMapper = vtk.vtkPolyDataMapper()
rectGridGeomMapper.SetInputConnection(rectGridGeom.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(rectGridOutlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
gridGeomActor = vtk.vtkActor()
gridGeomActor.SetMapper(rectGridGeomMapper)
gridGeomActor.GetProperty().SetRepresentationToWireframe()
gridGeomActor.GetProperty().SetColor(1, 0, 0)
#------------ CHALLENGE TWO ----------------------
magnitudeCalcFilter = vtk.vtkArrayCalculator()
magnitudeCalcFilter.SetInputConnection(rectGridReader.GetOutputPort())
magnitudeCalcFilter.AddVectorArrayName('vectors')
magnitudeCalcFilter.SetResultArrayName('magnitude')
magnitudeCalcFilter.SetFunction("mag(vectors)")
magnitudeCalcFilter.Update()
#------------ CHALLENGE THREE ----------------------
points = vtk.vtkPoints()
grid = magnitudeCalcFilter.GetOutput()
grid.GetPoints(points)
scalars = grid.GetPointData().GetArray("magnitude")
ugrid = vtk.vtkUnstructuredGrid()
ugrid.SetPoints(points)
ugrid.GetPointData().SetScalars(scalars)
for i in range (0, grid.GetNumberOfCells()):
cell = grid.GetCell(i)
ugrid.InsertNextCell(cell.GetCellType(), cell.GetPointIds())
subset = vtk.vtkMaskPoints()
subset.SetOnRatio(50)
subset.RandomModeOn()
subset.SetInputData(ugrid)
pointsGlyph = vtk.vtkVertexGlyphFilter()
pointsGlyph.SetInputConnection(subset.GetOutputPort())
#pointsGlyph.SetInputData(ugrid)
pointsGlyph.Update()
pointsMapper = vtk.vtkPolyDataMapper()
pointsMapper.SetInputConnection(pointsGlyph.GetOutputPort())
pointsMapper.SetScalarModeToUsePointData()
pointsActor = vtk.vtkActor()
pointsActor.SetMapper(pointsMapper)
#------------ CHALLENGE FOUR ----------------------
scalarRange = ugrid.GetPointData().GetScalars().GetRange()
print(scalarRange)
isoFilter = vtk.vtkContourFilter()
isoFilter.SetInputData(ugrid)
isoFilter.GenerateValues(10, scalarRange)
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInputConnection(isoFilter.GetOutputPort())
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetOpacity(0.5)
#------------ CHALLENGE FIVE ----------------------
subset = vtk.vtkMaskPoints()
subset.SetOnRatio(10)
subset.RandomModeOn()
subset.SetInputConnection(rectGridReader.GetOutputPort())
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
lut.SetHueRange(0.667, 0.0)
lut.SetVectorModeToMagnitude()
lut.Build()
hh = vtk.vtkHedgeHog()
hh.SetInputConnection(subset.GetOutputPort())
hh.SetScaleFactor(0.001)
hhm = vtk.vtkPolyDataMapper()
hhm.SetInputConnection(hh.GetOutputPort())
hhm.SetLookupTable(lut)
hhm.SetScalarVisibility(True)
hhm.SetScalarModeToUsePointFieldData()
hhm.SelectColorArray('vectors')
hhm.SetScalarRange((rectGridReader.GetOutput().GetPointData().GetVectors().GetRange(-1)))
hha = vtk.vtkActor()
hha.SetMapper(hhm)
#------------ RENDERER, RENDER WINDOW, AND INTERACTOR ----------------------
#Option 1: Default vtk render window
renderer = vtk.vtkRenderer()
renderer.SetBackground(0.5, 0.5, 0.5)
#renderer.AddActor(outlineActor)
#renderer.AddActor(gridGeomActor)
#renderer.AddActor(pointsActor)
#renderer.AddActor(isoActor)
renderer.AddActor(hha)
renderer.ResetCamera()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetSize(500, 500)
renderWindow.Render()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renderWindow)
iren.Start()
|
dianafprieto/SS_2017
|
scripts/05_NB_Challenge_1.py
|
Python
|
mit
| 4,001
|
[
"VTK"
] |
448c569d552f9bf8b1c8442d8be46c4d07f9047c1267edbabd511c0ef030f86a
|
#!/usr/bin/python2
import optparse
import os
import subprocess
import sys
def num_cpus():
# Use multiprocessing module, available in Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
pass
# Get POSIX system config value for number of processors.
posix_num_cpus = os.sysconf("SC_NPROCESSORS_ONLN")
if posix_num_cpus != -1:
return posix_num_cpus
# Guess
return 2
def yum_install(pkg):
return subprocess.call(["yum", "install", "-y", pkg])
def install_system_deps():
status = subprocess.call(["yum", "--exclude=systemtap",
"groupinstall", "-y", "Development tools"])
if status:
return status
status = subprocess.call(["yum", "install", "-y",
"ed",
"readline-devel",
"zlib-devel",
"curl-devel",
"bzip2-devel",
"python-devel",
"apr-devel",
"libevent-devel",
"openssl-libs",
"openssl-devel",
"libyaml",
"libyaml-devel",
"epel-release",
"htop",
"perl-Env",
"perl-ExtUtils-Embed",
"libxml2-devel",
"libxslt-devel"])
if status:
return status
status = subprocess.call(["curl", "https://bootstrap.pypa.io/get-pip.py", "-o", "get-pip.py"])
if status:
return status
status = subprocess.call(["python", "get-pip.py"])
if status:
return status
status = subprocess.call(["pip", "install", "psutil", "lockfile", "paramiko", "setuptools",
"epydoc"])
return status
def install_dependency(dependency_name):
return subprocess.call(
["tar",
"-xzf",
dependency_name + "/" + dependency_name + ".tar.gz",
"-C",
"/usr/local"])
def configure():
return subprocess.call(["./configure",
"--enable-orca",
"--enable-mapreduce",
"--with-perl",
"--with-libxml",
"--with-python",
"--prefix=/usr/local/gpdb"], cwd="gpdb_src")
def make():
return subprocess.call(["make",
"-j" + str(num_cpus())], cwd="gpdb_src")
def install(output_dir):
subprocess.call(["make", "install"], cwd="gpdb_src")
subprocess.call("mkdir -p " + output_dir, shell=True)
return subprocess.call("cp -r /usr/local/gpdb/* " + output_dir, shell=True)
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
(options, args) = parser.parse_args()
status = install_system_deps()
if status:
return status
for dependency in args:
status = install_dependency(dependency)
if status:
return status
status = configure()
if status:
return status
status = make()
if status:
return status
status = install(options.output_dir)
if status:
return status
return 0
if __name__ == "__main__":
sys.exit(main())
|
atris/gpdb
|
concourse/scripts/build_with_orca.py
|
Python
|
apache-2.0
| 3,771
|
[
"ORCA"
] |
0fde3f34013a81c483bab3aedfba9b6c227b72bcdc625cdd7c78cc2b3baf77b6
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Paraview(CMakePackage):
"""ParaView is an open-source, multi-platform data analysis and
visualization application."""
homepage = 'http://www.paraview.org'
url = "http://www.paraview.org/files/v5.3/ParaView-v5.3.0.tar.gz"
_urlfmt = 'http://www.paraview.org/files/v{0}/ParaView-v{1}{2}.tar.gz'
version('5.4.1', '4030c70477ec5a85aa72d6fc86a30753')
version('5.4.0', 'b92847605bac9036414b644f33cb7163')
version('5.3.0', '68fbbbe733aa607ec13d1db1ab5eba71')
version('5.2.0', '4570d1a2a183026adb65b73c7125b8b0')
version('5.1.2', '44fb32fc8988fcdfbc216c9e40c3e925')
version('5.0.1', 'fdf206113369746e2276b95b257d2c9b')
version('4.4.0', 'fa1569857dd680ebb4d7ff89c2227378')
variant('plugins', default=True,
description='Install include files for plugins support')
variant('python', default=False, description='Enable Python support')
variant('mpi', default=True, description='Enable MPI support')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('qt', default=False, description='Enable Qt (gui) support')
variant('opengl2', default=True, description='Enable OpenGL2 backend')
depends_on('python@2:2.8', when='+python')
depends_on('py-numpy', when='+python', type='run')
depends_on('py-matplotlib', when='+python', type='run')
depends_on('mpi', when='+mpi')
depends_on('qt', when='@5.3.0:+qt')
depends_on('qt@:4', when='@:5.2.0+qt')
depends_on('mesa+swrender', when='+osmesa')
conflicts('+qt', when='+osmesa')
depends_on('bzip2')
depends_on('freetype')
# depends_on('hdf5+mpi', when='+mpi')
# depends_on('hdf5~mpi', when='~mpi')
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('libxml2')
# depends_on('netcdf')
# depends_on('netcdf-cxx')
# depends_on('protobuf') # version mismatches?
# depends_on('sqlite') # external version not supported
depends_on('zlib')
depends_on('cmake@3.3:', type='build')
patch('stl-reader-pv440.patch', when='@4.4.0')
# Broken gcc-detection - improved in 5.1.0, redundant later
patch('gcc-compiler-pv501.patch', when='@:5.0.1')
# Broken installation (ui_pqExportStateWizard.h) - fixed in 5.2.0
patch('ui_pqExportStateWizard.patch', when='@:5.1.2')
def url_for_version(self, version):
"""Handle ParaView version-based custom URLs."""
if version < Version('5.1.0'):
return self._urlfmt.format(version.up_to(2), version, '-source')
else:
return self._urlfmt.format(version.up_to(2), version, '')
def setup_environment(self, spack_env, run_env):
if os.path.isdir(self.prefix.lib64):
lib_dir = self.prefix.lib64
else:
lib_dir = self.prefix.lib
paraview_version = 'paraview-%s' % self.spec.version.up_to(2)
run_env.prepend_path('LIBRARY_PATH', join_path(lib_dir,
paraview_version))
run_env.prepend_path('LD_LIBRARY_PATH', join_path(lib_dir,
paraview_version))
def cmake_args(self):
"""Populate cmake arguments for ParaView."""
spec = self.spec
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
def nvariant_bool(feature):
"""Negated ternary for spec variant to OFF/ON string"""
return variant_bool(feature, on='OFF', off='ON')
rendering = variant_bool('+opengl2', 'OpenGL2', 'OpenGL')
includes = variant_bool('+plugins')
cmake_args = [
'-DPARAVIEW_BUILD_QT_GUI:BOOL=%s' % variant_bool('+qt'),
'-DVTK_OPENGL_HAS_OSMESA:BOOL=%s' % variant_bool('+osmesa'),
'-DVTK_USE_X:BOOL=%s' % nvariant_bool('+osmesa'),
'-DVTK_RENDERING_BACKEND:STRING=%s' % rendering,
'-DPARAVIEW_INSTALL_DEVELOPMENT_FILES:BOOL=%s' % includes,
'-DBUILD_TESTING:BOOL=OFF',
'-DVTK_USE_SYSTEM_FREETYPE:BOOL=ON',
'-DVTK_USE_SYSTEM_HDF5:BOOL=OFF',
'-DVTK_USE_SYSTEM_JPEG:BOOL=ON',
'-DVTK_USE_SYSTEM_LIBXML2:BOOL=ON',
'-DVTK_USE_SYSTEM_NETCDF:BOOL=OFF',
'-DVTK_USE_SYSTEM_TIFF:BOOL=ON',
'-DVTK_USE_SYSTEM_ZLIB:BOOL=ON',
]
# The assumed qt version changed to QT5 (as of paraview 5.2.1),
# so explicitly specify which QT major version is actually being used
if '+qt' in spec:
cmake_args.extend([
'-DPARAVIEW_QT_VERSION=%s' % spec['qt'].version[0],
])
if '+python' in spec:
cmake_args.extend([
'-DPARAVIEW_ENABLE_PYTHON:BOOL=ON',
'-DPYTHON_EXECUTABLE:FILEPATH=%s' % spec['python'].command.path
])
if '+mpi' in spec:
cmake_args.extend([
'-DPARAVIEW_USE_MPI:BOOL=ON',
'-DMPIEXEC:FILEPATH=%s/bin/mpiexec' % spec['mpi'].prefix
])
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DPARAVIEW_DO_UNIX_STYLE_INSTALLS:BOOL=ON',
])
# Hide git from Paraview so it will not use `git describe`
# to find its own version number
if spec.satisfies('@5.4.0:5.4.1'):
cmake_args.extend([
'-DGIT_EXECUTABLE=FALSE'
])
return cmake_args
|
lgarren/spack
|
var/spack/repos/builtin/packages/paraview/package.py
|
Python
|
lgpl-2.1
| 6,854
|
[
"NetCDF",
"ParaView"
] |
d35e8e32d132bd994da1ac00839882b7414de0bfaf001cbc742fe8439473dab8
|
#!/usr/bin/python
import HTSeq
from Bio.Seq import Seq
import os.path
import argparse
def reverseComplement(strDNA):
basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
strDNArevC = ''
for l in strDNA:
strDNArevC += basecomplement[l]
return strDNArevC[::-1]
def translateSeq(DNASeq,transTable):
seq=DNASeq
tableid=transTable
reversedSeq=False
try:
myseq= Seq(seq)
protseq=Seq.translate(myseq, table=tableid,cds=True)
except:
reversedSeq=True
try:
seq=reverseComplement(seq)
myseq= Seq(seq)
protseq=Seq.translate(myseq, table=tableid,cds=True)
except:
try:
seq=seq[::-1]
myseq= Seq(seq)
protseq=Seq.translate(myseq, table=tableid,cds=True)
except:
reversedSeq=False
try:
seq=seq[::-1]
seq=reverseComplement(seq)
myseq= Seq(seq)
protseq=Seq.translate(myseq, table=tableid,cds=True)
except Exception as e:
raise ValueError(e)
return protseq,seq,reversedSeq
def main():
parser = argparse.ArgumentParser(description="This program downloads sequencing runs given the sra RUN ID in a list to a selected directory")
parser.add_argument('-i', nargs='?', type=str, help='list genes', required=True)
parser.add_argument('-r', nargs='?', type=bool, help='Return values', required=False)
args=parser.parse_args()
genes = args.i
try:
ReturnValues=bool(args.r)
except:
ReturnValues=False
pass
analyzeCDS(genes,ReturnValues)
def analyzeCDS(genes,transTable,ReturnValues):
gene_fp = open( genes, 'r')
stopc=0
notStart=0
notMultiple=0
totalalleles=0
statsPerGene={}
for gene in gene_fp:
listStopc=[]
listnotStart=[]
listnotMultiple=[]
print "####################"
print str(os.path.basename(gene))
k=0
gene = gene.rstrip('\n')
multiple=True
gene_fp2 = HTSeq.FastaReader(gene)
# translate each allele and report the error if unable to translate
for allele in gene_fp2:
k+=1
# if allele is not multiple of 3 it's useless to try to translate
if (len(allele.seq) % 3 != 0):
multiple=False
listnotMultiple.append(str(k))
print "allele "+str(k)+" is not multiple of 3"
pass
else:
try:
protseq,seq,reversedSeq=translateSeq(allele.seq, transTable)
except Exception, err:
if "Extra in frame stop codon found" in str(err):
stopc+=1
listStopc.append(str(k))
elif "is not a start codon" in str(err):
notStart+=1
listnotStart.append(str(k))
else:
print err
print "allele "+str(k)+" is not translating"
pass
statsPerGene[gene]=listnotMultiple,listStopc,listnotStart,k
totalalleles+=k
print str(stopc) + " alleles have stop codons inside"
print str(notStart) + " alleles don't have start codons"
print "total of alleles : " + str(totalalleles)
if not ReturnValues:
with open("CheckCDSResults.txt", "wb") as f:
f.write("Alleles with stop codons inside: \n")
for item in listStopc:
f.write(item)
f.write("\n")
f.write("\nAlleles without start codon: \n")
for item in listnotStart:
f.write(item)
f.write("\n")
else:
return statsPerGene
if __name__ == "__main__":
main()
|
mickaelsilva/pythonscripts
|
SchemaValidation/CheckCDS.py
|
Python
|
gpl-2.0
| 3,219
|
[
"HTSeq"
] |
ab1c554f167791e2e307ca6a50b4651599f47fd67f7c694715f0872fad23b6ed
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.