code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# $Id: _SkeletonPage.py,v 1.13 2002/10/01 17:52:02 tavis_rudd Exp $
"""A baseclass for the SkeletonPage template
Meta-Data
==========
Author: Tavis Rudd <tavis@damnsimple.com>,
Version: $Revision: 1.13 $
Start Date: 2001/04/05
Last Revision Date: $Date: 2002/10/01 17:52:02 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.13 $"[11:-2]
##################################################
## DEPENDENCIES ##
import time, types, os, sys
# intra-package imports ...
from Cheetah.Template import Template
##################################################
## GLOBALS AND CONSTANTS ##
True = (1==1)
False = (0==1)
##################################################
## CLASSES ##
class _SkeletonPage(Template):
"""A baseclass for the SkeletonPage template"""
docType = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" ' + \
'"http://www.w3.org/TR/html4/loose.dtd">'
# docType = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ' + \
#'"http://www.w3.org/TR/xhtml1l/DTD/transitional.dtd">'
title = ''
siteDomainName = 'www.example.com'
siteCredits = 'Designed & Implemented by Tavis Rudd'
siteCopyrightName = "Tavis Rudd"
htmlTag = '<html>'
def __init__(self, *args, **KWs):
Template.__init__(self, *args, **KWs)
self._metaTags = {'HTTP-EQUIV':{'keywords': 'Cheetah',
'Content-Type': 'text/html; charset=iso-8859-1',
},
'NAME':{'generator':'Cheetah: The Python-Powered Template Engine'}
}
# metaTags = {'HTTP_EQUIV':{'test':1234}, 'NAME':{'test':1234,'test2':1234} }
self._stylesheets = {}
# stylesheets = {'.cssClassName':'stylesheetCode'}
self._stylesheetsOrder = []
# stylesheetsOrder = ['.cssClassName',]
self._stylesheetLibs = {}
# stylesheetLibs = {'libName':'libSrcPath'}
self._javascriptLibs = {}
self._javascriptTags = {}
# self._javascriptLibs = {'libName':'libSrcPath'}
self._bodyTagAttribs = {}
def metaTags(self):
"""Return a formatted vesion of the self._metaTags dictionary, using the
formatMetaTags function from Cheetah.Macros.HTML"""
return self.formatMetaTags(self._metaTags)
def stylesheetTags(self):
"""Return a formatted version of the self._stylesheetLibs and
self._stylesheets dictionaries. The keys in self._stylesheets must
be listed in the order that they should appear in the list
self._stylesheetsOrder, to ensure that the style rules are defined in
the correct order."""
stylesheetTagsTxt = ''
for title, src in self._stylesheetLibs.items():
stylesheetTagsTxt += '<link rel="stylesheet" type="text/css" href="' + str(src) + '" />\n'
if not self._stylesheetsOrder:
return stylesheetTagsTxt
stylesheetTagsTxt += '<style type="text/css"><!--\n'
for identifier in self._stylesheetsOrder:
if identifier not in self._stylesheets:
warning = '# the identifier ' + identifier + \
'was in stylesheetsOrder, but not in stylesheets'
print(warning)
stylesheetTagsTxt += warning
continue
attribsDict = self._stylesheets[identifier]
cssCode = ''
attribCode = ''
for k, v in attribsDict.items():
attribCode += str(k) + ': ' + str(v) + '; '
attribCode = attribCode[:-2] # get rid of the last semicolon
cssCode = '\n' + identifier + ' {' + attribCode + '}'
stylesheetTagsTxt += cssCode
stylesheetTagsTxt += '\n//--></style>\n'
return stylesheetTagsTxt
def javascriptTags(self):
"""Return a formatted version of the javascriptTags and
javascriptLibs dictionaries. Each value in javascriptTags
should be a either a code string to include, or a list containing the
JavaScript version number and the code string. The keys can be anything.
The same applies for javascriptLibs, but the string should be the
SRC filename rather than a code string."""
javascriptTagsTxt = []
for key, details in self._javascriptTags.iteritems():
if not isinstance(details, (list, tuple)):
details = ['', details]
javascriptTagsTxt += ['<script language="JavaScript', str(details[0]),
'" type="text/javascript"><!--\n',
str(details[0]), '\n//--></script>\n']
for key, details in self._javascriptLibs.iteritems():
if not isinstance(details, (list, tuple)):
details = ['', details]
javascriptTagsTxt += ['<script language="JavaScript', str(details[0]),
'" type="text/javascript" src="',
str(details[1]), '" />\n']
return ''.join(javascriptTagsTxt)
def bodyTag(self):
"""Create a body tag from the entries in the dict bodyTagAttribs."""
return self.formHTMLTag('body', self._bodyTagAttribs)
def imgTag(self, src, alt='', width=None, height=None, border=0):
"""Dynamically generate an image tag. Cheetah will try to convert the
src argument to a WebKit serverSidePath relative to the servlet's
location. If width and height aren't specified they are calculated using
PIL or ImageMagick if available."""
src = self.normalizePath(src)
if not width or not height:
try: # see if the dimensions can be calc'd with PIL
import Image
im = Image.open(src)
calcWidth, calcHeight = im.size
del im
if not width: width = calcWidth
if not height: height = calcHeight
except:
try: # try imageMagick instead
calcWidth, calcHeight = os.popen(
'identify -format "%w,%h" ' + src).read().split(',')
if not width: width = calcWidth
if not height: height = calcHeight
except:
pass
if width and height:
return ''.join(['<img src="', src, '" width="', str(width), '" height="', str(height),
'" alt="', alt, '" border="', str(border), '" />'])
elif width:
return ''.join(['<img src="', src, '" width="', str(width),
'" alt="', alt, '" border="', str(border), '" />'])
elif height:
return ''.join(['<img src="', src, '" height="', str(height),
'" alt="', alt, '" border="', str(border), '" />'])
else:
return ''.join(['<img src="', src, '" alt="', alt, '" border="', str(border), '" />'])
def currentYr(self):
"""Return a string representing the current yr."""
return time.strftime("%Y", time.localtime(time.time()))
def currentDate(self, formatString="%b %d, %Y"):
"""Return a string representing the current localtime."""
return time.strftime(formatString, time.localtime(time.time()))
def spacer(self, width=1,height=1):
return '<img src="spacer.gif" width="%s" height="%s" alt="" />'% (str(width), str(height))
def formHTMLTag(self, tagName, attributes={}):
"""returns a string containing an HTML <tag> """
tagTxt = ['<', tagName.lower()]
for name, val in attributes.items():
tagTxt += [' ', name.lower(), '="', str(val), '"']
tagTxt.append('>')
return ''.join(tagTxt)
def formatMetaTags(self, metaTags):
"""format a dict of metaTag definitions into an HTML version"""
metaTagsTxt = []
if 'HTTP-EQUIV' in metaTags:
for http_equiv, contents in metaTags['HTTP-EQUIV'].items():
metaTagsTxt += ['<meta http-equiv="', str(http_equiv), '" content="',
str(contents), '" />\n']
if 'NAME' in metaTags:
for name, contents in metaTags['NAME'].items():
metaTagsTxt += ['<meta name="', str(name), '" content="', str(contents),
'" />\n']
return ''.join(metaTagsTxt)
| Python |
"""A Skeleton HTML page template, that provides basic structure and utility methods.
"""
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Cheetah.Templates._SkeletonPage import _SkeletonPage
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0rc6'
__CHEETAH_versionTuple__ = (2, 0, 0, 'candidate', 6)
__CHEETAH_genTime__ = 1139107954.3640411
__CHEETAH_genTimestamp__ = 'Sat Feb 4 18:52:34 2006'
__CHEETAH_src__ = 'src/Templates/SkeletonPage.tmpl'
__CHEETAH_srcLastModified__ = 'Mon Oct 7 11:37:30 2002'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class SkeletonPage(_SkeletonPage):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
_SkeletonPage.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k, v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def writeHeadTag(self, **KWS):
## CHEETAH: generated from #block writeHeadTag at line 22, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not hasattr(self.transaction, '__call__')):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('<head>\n<title>')
_v = VFFSL(SL, "title", True) # '$title' on line 24, col 8
if _v is not None: write(_filter(_v, rawExpr='$title')) # from line 24, col 8.
write('</title>\n')
_v = VFFSL(SL, "metaTags", True) # '$metaTags' on line 25, col 1
if _v is not None: write(_filter(_v, rawExpr='$metaTags')) # from line 25, col 1.
write(' \n')
_v = VFFSL(SL, "stylesheetTags", True) # '$stylesheetTags' on line 26, col 1
if _v is not None: write(_filter(_v, rawExpr='$stylesheetTags')) # from line 26, col 1.
write(' \n')
_v = VFFSL(SL, "javascriptTags", True) # '$javascriptTags' on line 27, col 1
if _v is not None: write(_filter(_v, rawExpr='$javascriptTags')) # from line 27, col 1.
write('\n</head>\n')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: generated from #block writeBody at line 36, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not hasattr(self.transaction, '__call__')):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('This skeleton page has no flesh. Its body needs to be implemented.\n')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not hasattr(self.transaction, '__call__')):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
## START CACHE REGION: ID=header. line 6, col 1 in the source.
_RECACHE_header = False
_cacheRegion_header = self.getCacheRegion(regionID='header', cacheInfo={'type': 2, 'id': 'header'})
if _cacheRegion_header.isNew():
_RECACHE_header = True
_cacheItem_header = _cacheRegion_header.getCacheItem('header')
if _cacheItem_header.hasExpired():
_RECACHE_header = True
if (not _RECACHE_header) and _cacheItem_header.getRefreshTime():
try:
_output = _cacheItem_header.renderOutput()
except KeyError:
_RECACHE_header = True
else:
write(_output)
del _output
if _RECACHE_header or not _cacheItem_header.getRefreshTime():
_orig_transheader = trans
trans = _cacheCollector_header = DummyTransaction()
write = _cacheCollector_header.response().write
_v = VFFSL(SL, "docType", True) # '$docType' on line 7, col 1
if _v is not None: write(_filter(_v, rawExpr='$docType')) # from line 7, col 1.
write('\n')
_v = VFFSL(SL, "htmlTag", True) # '$htmlTag' on line 8, col 1
if _v is not None: write(_filter(_v, rawExpr='$htmlTag')) # from line 8, col 1.
write('''
<!-- This document was autogenerated by Cheetah(http://CheetahTemplate.org).
Do not edit it directly!
Copyright ''')
_v = VFFSL(SL, "currentYr", True) # '$currentYr' on line 12, col 11
if _v is not None: write(_filter(_v, rawExpr='$currentYr')) # from line 12, col 11.
write(' - ')
_v = VFFSL(SL, "siteCopyrightName", True) # '$siteCopyrightName' on line 12, col 24
if _v is not None: write(_filter(_v, rawExpr='$siteCopyrightName')) # from line 12, col 24.
write(' - All Rights Reserved.\nFeel free to copy any javascript or html you like on this site,\nprovided you remove all links and/or references to ')
_v = VFFSL(SL, "siteDomainName", True) # '$siteDomainName' on line 14, col 52
if _v is not None: write(_filter(_v, rawExpr='$siteDomainName')) # from line 14, col 52.
write('''
However, please do not copy any content or images without permission.
''')
_v = VFFSL(SL, "siteCredits", True) # '$siteCredits' on line 17, col 1
if _v is not None: write(_filter(_v, rawExpr='$siteCredits')) # from line 17, col 1.
write('''
-->
''')
self.writeHeadTag(trans=trans)
write('\n')
trans = _orig_transheader
write = trans.response().write
_cacheData = _cacheCollector_header.response().getvalue()
_cacheItem_header.setData(_cacheData)
write(_cacheData)
del _cacheData
del _cacheCollector_header
del _orig_transheader
## END CACHE REGION: header
write('\n')
_v = VFFSL(SL, "bodyTag", True) # '$bodyTag' on line 34, col 1
if _v is not None: write(_filter(_v, rawExpr='$bodyTag')) # from line 34, col 1.
write('\n\n')
self.writeBody(trans=trans)
write('''
</body>
</html>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_SkeletonPage= 'respond'
## END CLASS DEFINITION
if not hasattr(SkeletonPage, '_initCheetahAttributes'):
templateAPIClass = getattr(SkeletonPage, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(SkeletonPage)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=SkeletonPage()).run()
| Python |
# $Id: CacheRegion.py,v 1.3 2006/01/28 04:19:30 tavis_rudd Exp $
'''
Cache holder classes for Cheetah:
Cache regions are defined using the #cache Cheetah directive. Each
cache region can be viewed as a dictionary (keyed by cacheRegionID)
handling at least one cache item (the default one). It's possible to add
cacheItems in a region by using the `varyBy` #cache directive parameter as
in the following example::
#def getArticle
this is the article content.
#end def
#cache varyBy=$getArticleID()
$getArticle($getArticleID())
#end cache
The code above will generate a CacheRegion and add new cacheItem for each value
of $getArticleID().
'''
try:
from hashlib import md5
except ImportError:
from md5 import md5
import time
import Cheetah.CacheStore
class CacheItem(object):
'''
A CacheItem is a container storing:
- cacheID (string)
- refreshTime (timestamp or None) : last time the cache was refreshed
- data (string) : the content of the cache
'''
def __init__(self, cacheItemID, cacheStore):
self._cacheItemID = cacheItemID
self._cacheStore = cacheStore
self._refreshTime = None
self._expiryTime = 0
def hasExpired(self):
return (self._expiryTime and time.time() > self._expiryTime)
def setExpiryTime(self, time):
self._expiryTime = time
def getExpiryTime(self):
return self._expiryTime
def setData(self, data):
self._refreshTime = time.time()
self._cacheStore.set(self._cacheItemID, data, self._expiryTime)
def getRefreshTime(self):
return self._refreshTime
def getData(self):
assert self._refreshTime
return self._cacheStore.get(self._cacheItemID)
def renderOutput(self):
"""Can be overridden to implement edge-caching"""
return self.getData() or ""
def clear(self):
self._cacheStore.delete(self._cacheItemID)
self._refreshTime = None
class _CacheDataStoreWrapper(object):
def __init__(self, dataStore, keyPrefix):
self._dataStore = dataStore
self._keyPrefix = keyPrefix
def get(self, key):
return self._dataStore.get(self._keyPrefix+key)
def delete(self, key):
self._dataStore.delete(self._keyPrefix+key)
def set(self, key, val, time=0):
self._dataStore.set(self._keyPrefix+key, val, time=time)
class CacheRegion(object):
'''
A `CacheRegion` stores some `CacheItem` instances.
This implementation stores the data in the memory of the current process.
If you need a more advanced data store, create a cacheStore class that works
with Cheetah's CacheStore protocol and provide it as the cacheStore argument
to __init__. For example you could use
Cheetah.CacheStore.MemcachedCacheStore, a wrapper around the Python
memcached API (http://www.danga.com/memcached).
'''
_cacheItemClass = CacheItem
def __init__(self, regionID, templateCacheIdPrefix='', cacheStore=None):
self._isNew = True
self._regionID = regionID
self._templateCacheIdPrefix = templateCacheIdPrefix
if not cacheStore:
cacheStore = Cheetah.CacheStore.MemoryCacheStore()
self._cacheStore = cacheStore
self._wrappedCacheDataStore = _CacheDataStoreWrapper(
cacheStore, keyPrefix=templateCacheIdPrefix+':'+regionID+':')
self._cacheItems = {}
def isNew(self):
return self._isNew
def clear(self):
" drop all the caches stored in this cache region "
for cacheItemId in self._cacheItems.keys():
cacheItem = self._cacheItems[cacheItemId]
cacheItem.clear()
del self._cacheItems[cacheItemId]
def getCacheItem(self, cacheItemID):
""" Lazy access to a cacheItem
Try to find a cache in the stored caches. If it doesn't
exist, it's created.
Returns a `CacheItem` instance.
"""
cacheItemID = md5(str(cacheItemID)).hexdigest()
if cacheItemID not in self._cacheItems:
cacheItem = self._cacheItemClass(
cacheItemID=cacheItemID, cacheStore=self._wrappedCacheDataStore)
self._cacheItems[cacheItemID] = cacheItem
self._isNew = False
return self._cacheItems[cacheItemID]
| Python |
# $Id: CheetahWrapper.py,v 1.26 2007/10/02 01:22:04 tavis_rudd Exp $
"""Cheetah command-line interface.
2002-09-03 MSO: Total rewrite.
2002-09-04 MSO: Bugfix, compile command was using wrong output ext.
2002-11-08 MSO: Another rewrite.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>>
Version: $Revision: 1.26 $
Start Date: 2001/03/30
Last Revision Date: $Date: 2007/10/02 01:22:04 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>"
__revision__ = "$Revision: 1.26 $"[11:-2]
import getopt, glob, os, pprint, re, shutil, sys
import cPickle as pickle
from optparse import OptionParser
from Cheetah.Version import Version
from Cheetah.Template import Template, DEFAULT_COMPILER_SETTINGS
from Cheetah.Utils.Misc import mkdirsWithPyInitFiles
optionDashesRE = re.compile( R"^-{1,2}" )
moduleNameRE = re.compile( R"^[a-zA-Z_][a-zA-Z_0-9]*$" )
def fprintfMessage(stream, format, *args):
if format[-1:] == '^':
format = format[:-1]
else:
format += '\n'
if args:
message = format % args
else:
message = format
stream.write(message)
class Error(Exception):
pass
class Bundle:
"""Wrap the source, destination and backup paths in one neat little class.
Used by CheetahWrapper.getBundles().
"""
def __init__(self, **kw):
self.__dict__.update(kw)
def __repr__(self):
return "<Bundle %r>" % self.__dict__
##################################################
## USAGE FUNCTION & MESSAGES
def usage(usageMessage, errorMessage="", out=sys.stderr):
"""Write help text, an optional error message, and abort the program.
"""
out.write(WRAPPER_TOP)
out.write(usageMessage)
exitStatus = 0
if errorMessage:
out.write('\n')
out.write("*** USAGE ERROR ***: %s\n" % errorMessage)
exitStatus = 1
sys.exit(exitStatus)
WRAPPER_TOP = """\
__ ____________ __
\ \/ \/ /
\/ * * \/ CHEETAH %(Version)s Command-Line Tool
\ | /
\ ==----== / by Tavis Rudd <tavis@damnsimple.com>
\__________/ and Mike Orr <sluggoster@gmail.com>
""" % globals()
HELP_PAGE1 = """\
USAGE:
------
cheetah compile [options] [FILES ...] : Compile template definitions
cheetah fill [options] [FILES ...] : Fill template definitions
cheetah help : Print this help message
cheetah options : Print options help message
cheetah test [options] : Run Cheetah's regression tests
: (same as for unittest)
cheetah version : Print Cheetah version number
You may abbreviate the command to the first letter; e.g., 'h' == 'help'.
If FILES is a single "-", read standard input and write standard output.
Run "cheetah options" for the list of valid options.
"""
##################################################
## CheetahWrapper CLASS
class CheetahWrapper(object):
MAKE_BACKUPS = True
BACKUP_SUFFIX = ".bak"
_templateClass = None
_compilerSettings = None
def __init__(self):
self.progName = None
self.command = None
self.opts = None
self.pathArgs = None
self.sourceFiles = []
self.searchList = []
self.parser = None
##################################################
## MAIN ROUTINE
def main(self, argv=None):
"""The main program controller."""
if argv is None:
argv = sys.argv
# Step 1: Determine the command and arguments.
try:
self.progName = progName = os.path.basename(argv[0])
self.command = command = optionDashesRE.sub("", argv[1])
if command == 'test':
self.testOpts = argv[2:]
else:
self.parseOpts(argv[2:])
except IndexError:
usage(HELP_PAGE1, "not enough command-line arguments")
# Step 2: Call the command
meths = (self.compile, self.fill, self.help, self.options,
self.test, self.version)
for meth in meths:
methName = meth.__name__
# Or meth.im_func.func_name
# Or meth.func_name (Python >= 2.1 only, sometimes works on 2.0)
methInitial = methName[0]
if command in (methName, methInitial):
sys.argv[0] += (" " + methName)
# @@MO: I don't necessarily agree sys.argv[0] should be
# modified.
meth()
return
# If none of the commands matched.
usage(HELP_PAGE1, "unknown command '%s'" % command)
def parseOpts(self, args):
C, D, W = self.chatter, self.debug, self.warn
self.isCompile = isCompile = self.command[0] == 'c'
defaultOext = isCompile and ".py" or ".html"
self.parser = OptionParser()
pao = self.parser.add_option
pao("--idir", action="store", dest="idir", default='', help='Input directory (defaults to current directory)')
pao("--odir", action="store", dest="odir", default="", help='Output directory (defaults to current directory)')
pao("--iext", action="store", dest="iext", default=".tmpl", help='File input extension (defaults: compile: .tmpl, fill: .tmpl)')
pao("--oext", action="store", dest="oext", default=defaultOext, help='File output extension (defaults: compile: .py, fill: .html)')
pao("-R", action="store_true", dest="recurse", default=False, help='Recurse through subdirectories looking for input files')
pao("--stdout", "-p", action="store_true", dest="stdout", default=False, help='Send output to stdout instead of writing to a file')
pao("--quiet", action="store_false", dest="verbose", default=True, help='Do not print informational messages to stdout')
pao("--debug", action="store_true", dest="debug", default=False, help='Print diagnostic/debug information to stderr')
pao("--env", action="store_true", dest="env", default=False, help='Pass the environment into the search list')
pao("--pickle", action="store", dest="pickle", default="", help='Unpickle FILE and pass it through in the search list')
pao("--flat", action="store_true", dest="flat", default=False, help='Do not build destination subdirectories')
pao("--nobackup", action="store_true", dest="nobackup", default=False, help='Do not make backup files when generating new ones')
pao("--settings", action="store", dest="compilerSettingsString", default=None, help='String of compiler settings to pass through, e.g. --settings="useNameMapper=False,useFilters=False"')
pao('--print-settings', action='store_true', dest='print_settings', help='Print out the list of available compiler settings')
pao("--templateAPIClass", action="store", dest="templateClassName", default=None, help='Name of a subclass of Cheetah.Template.Template to use for compilation, e.g. MyTemplateClass')
pao("--parallel", action="store", type="int", dest="parallel", default=1, help='Compile/fill templates in parallel, e.g. --parallel=4')
pao('--shbang', dest='shbang', default='#!/usr/bin/env python', help='Specify the shbang to place at the top of compiled templates, e.g. --shbang="#!/usr/bin/python2.6"')
opts, files = self.parser.parse_args(args)
self.opts = opts
if sys.platform == "win32":
new_files = []
for spec in files:
file_list = glob.glob(spec)
if file_list:
new_files.extend(file_list)
else:
new_files.append(spec)
files = new_files
self.pathArgs = files
D("""\
cheetah compile %s
Options are
%s
Files are %s""", args, pprint.pformat(vars(opts)), files)
if opts.print_settings:
print()
print('>> Available Cheetah compiler settings:')
from Cheetah.Compiler import _DEFAULT_COMPILER_SETTINGS
listing = _DEFAULT_COMPILER_SETTINGS
listing.sort(key=lambda l: l[0][0].lower())
for l in listing:
print('\t%s (default: "%s")\t%s' % l)
sys.exit(0)
#cleanup trailing path separators
seps = [sep for sep in [os.sep, os.altsep] if sep]
for attr in ['idir', 'odir']:
for sep in seps:
path = getattr(opts, attr, None)
if path and path.endswith(sep):
path = path[:-len(sep)]
setattr(opts, attr, path)
break
self._fixExts()
if opts.env:
self.searchList.insert(0, os.environ)
if opts.pickle:
f = open(opts.pickle, 'rb')
unpickled = pickle.load(f)
f.close()
self.searchList.insert(0, unpickled)
##################################################
## COMMAND METHODS
def compile(self):
self._compileOrFill()
def fill(self):
from Cheetah.ImportHooks import install
install()
self._compileOrFill()
def help(self):
usage(HELP_PAGE1, "", sys.stdout)
def options(self):
return self.parser.print_help()
def test(self):
# @@MO: Ugly kludge.
TEST_WRITE_FILENAME = 'cheetah_test_file_creation_ability.tmp'
try:
f = open(TEST_WRITE_FILENAME, 'w')
except:
sys.exit("""\
Cannot run the tests because you don't have write permission in the current
directory. The tests need to create temporary files. Change to a directory
you do have write permission to and re-run the tests.""")
else:
f.close()
os.remove(TEST_WRITE_FILENAME)
# @@MO: End ugly kludge.
from Cheetah.Tests import Test
import unittest
verbosity = 1
if '-q' in self.testOpts:
verbosity = 0
if '-v' in self.testOpts:
verbosity = 2
runner = unittest.TextTestRunner(verbosity=verbosity)
runner.run(unittest.TestSuite(Test.suites))
def version(self):
print(Version)
# If you add a command, also add it to the 'meths' variable in main().
##################################################
## LOGGING METHODS
def chatter(self, format, *args):
"""Print a verbose message to stdout. But don't if .opts.stdout is
true or .opts.verbose is false.
"""
if self.opts.stdout or not self.opts.verbose:
return
fprintfMessage(sys.stdout, format, *args)
def debug(self, format, *args):
"""Print a debugging message to stderr, but don't if .debug is
false.
"""
if self.opts.debug:
fprintfMessage(sys.stderr, format, *args)
def warn(self, format, *args):
"""Always print a warning message to stderr.
"""
fprintfMessage(sys.stderr, format, *args)
def error(self, format, *args):
"""Always print a warning message to stderr and exit with an error code.
"""
fprintfMessage(sys.stderr, format, *args)
sys.exit(1)
##################################################
## HELPER METHODS
def _fixExts(self):
assert self.opts.oext, "oext is empty!"
iext, oext = self.opts.iext, self.opts.oext
if iext and not iext.startswith("."):
self.opts.iext = "." + iext
if oext and not oext.startswith("."):
self.opts.oext = "." + oext
def _compileOrFill(self):
C, D, W = self.chatter, self.debug, self.warn
opts, files = self.opts, self.pathArgs
if files == ["-"]:
self._compileOrFillStdin()
return
elif not files and opts.recurse:
which = opts.idir and "idir" or "current"
C("Drilling down recursively from %s directory.", which)
sourceFiles = []
dir = os.path.join(self.opts.idir, os.curdir)
os.path.walk(dir, self._expandSourceFilesWalk, sourceFiles)
elif not files:
usage(HELP_PAGE1, "Neither files nor -R specified!")
else:
sourceFiles = self._expandSourceFiles(files, opts.recurse, True)
sourceFiles = [os.path.normpath(x) for x in sourceFiles]
D("All source files found: %s", sourceFiles)
bundles = self._getBundles(sourceFiles)
D("All bundles: %s", pprint.pformat(bundles))
if self.opts.flat:
self._checkForCollisions(bundles)
# In parallel mode a new process is forked for each template
# compilation, out of a pool of size self.opts.parallel. This is not
# really optimal in all cases (e.g. probably wasteful for small
# templates), but seems to work well in real life for me.
#
# It also won't work for Windows users, but I'm not going to lose any
# sleep over that.
if self.opts.parallel > 1:
bad_child_exit = 0
pid_pool = set()
def child_wait():
pid, status = os.wait()
pid_pool.remove(pid)
return os.WEXITSTATUS(status)
while bundles:
b = bundles.pop()
pid = os.fork()
if pid:
pid_pool.add(pid)
else:
self._compileOrFillBundle(b)
sys.exit(0)
if len(pid_pool) == self.opts.parallel:
bad_child_exit = child_wait()
if bad_child_exit:
break
while pid_pool:
child_exit = child_wait()
if not bad_child_exit:
bad_child_exit = child_exit
if bad_child_exit:
sys.exit("Child process failed, exited with code %d" % bad_child_exit)
else:
for b in bundles:
self._compileOrFillBundle(b)
def _checkForCollisions(self, bundles):
"""Check for multiple source paths writing to the same destination
path.
"""
C, D, W = self.chatter, self.debug, self.warn
isError = False
dstSources = {}
for b in bundles:
if b.dst in dstSources:
dstSources[b.dst].append(b.src)
else:
dstSources[b.dst] = [b.src]
keys = sorted(dstSources.keys())
for dst in keys:
sources = dstSources[dst]
if len(sources) > 1:
isError = True
sources.sort()
fmt = "Collision: multiple source files %s map to one destination file %s"
W(fmt, sources, dst)
if isError:
what = self.isCompile and "Compilation" or "Filling"
sys.exit("%s aborted due to collisions" % what)
def _expandSourceFilesWalk(self, arg, dir, files):
"""Recursion extension for .expandSourceFiles().
This method is a callback for os.path.walk().
'arg' is a list to which successful paths will be appended.
"""
iext = self.opts.iext
for f in files:
path = os.path.join(dir, f)
if path.endswith(iext) and os.path.isfile(path):
arg.append(path)
elif os.path.islink(path) and os.path.isdir(path):
os.path.walk(path, self._expandSourceFilesWalk, arg)
# If is directory, do nothing; 'walk' will eventually get it.
def _expandSourceFiles(self, files, recurse, addIextIfMissing):
"""Calculate source paths from 'files' by applying the
command-line options.
"""
C, D, W = self.chatter, self.debug, self.warn
idir = self.opts.idir
iext = self.opts.iext
files = []
for f in self.pathArgs:
oldFilesLen = len(files)
D("Expanding %s", f)
path = os.path.join(idir, f)
pathWithExt = path + iext # May or may not be valid.
if os.path.isdir(path):
if recurse:
os.path.walk(path, self._expandSourceFilesWalk, files)
else:
raise Error("source file '%s' is a directory" % path)
elif os.path.isfile(path):
files.append(path)
elif (addIextIfMissing and not path.endswith(iext) and
os.path.isfile(pathWithExt)):
files.append(pathWithExt)
# Do not recurse directories discovered by iext appending.
elif os.path.exists(path):
W("Skipping source file '%s', not a plain file.", path)
else:
W("Skipping source file '%s', not found.", path)
if len(files) > oldFilesLen:
D(" ... found %s", files[oldFilesLen:])
return files
def _getBundles(self, sourceFiles):
flat = self.opts.flat
idir = self.opts.idir
iext = self.opts.iext
nobackup = self.opts.nobackup
odir = self.opts.odir
oext = self.opts.oext
idirSlash = idir + os.sep
bundles = []
for src in sourceFiles:
# 'base' is the subdirectory plus basename.
base = src
if idir and src.startswith(idirSlash):
base = src[len(idirSlash):]
if iext and base.endswith(iext):
base = base[:-len(iext)]
basename = os.path.basename(base)
if flat:
dst = os.path.join(odir, basename + oext)
else:
dbn = basename
if odir and base.startswith(os.sep):
odd = odir
while odd != '':
idx = base.find(odd)
if idx == 0:
dbn = base[len(odd):]
if dbn[0] == '/':
dbn = dbn[1:]
break
odd = os.path.dirname(odd)
if odd == '/':
break
dst = os.path.join(odir, dbn + oext)
else:
dst = os.path.join(odir, base + oext)
bak = dst + self.BACKUP_SUFFIX
b = Bundle(src=src, dst=dst, bak=bak, base=base, basename=basename)
bundles.append(b)
return bundles
def _getTemplateClass(self):
C, D, W = self.chatter, self.debug, self.warn
modname = None
if self._templateClass:
return self._templateClass
modname = self.opts.templateClassName
if not modname:
return Template
p = modname.rfind('.')
if ':' not in modname:
self.error('The value of option --templateAPIClass is invalid\n'
'It must be in the form "module:class", '
'e.g. "Cheetah.Template:Template"')
modname, classname = modname.split(':')
C('using --templateAPIClass=%s:%s'%(modname, classname))
if p >= 0:
mod = getattr(__import__(modname[:p], {}, {}, [modname[p+1:]]), modname[p+1:])
else:
mod = __import__(modname, {}, {}, [])
klass = getattr(mod, classname, None)
if klass:
self._templateClass = klass
return klass
else:
self.error('**Template class specified in option --templateAPIClass not found\n'
'**Falling back on Cheetah.Template:Template')
def _getCompilerSettings(self):
if self._compilerSettings:
return self._compilerSettings
def getkws(**kws):
return kws
if self.opts.compilerSettingsString:
try:
exec('settings = getkws(%s)'%self.opts.compilerSettingsString)
except:
self.error("There's an error in your --settings option."
"It must be valid Python syntax.\n"
+" --settings='%s'\n"%self.opts.compilerSettingsString
+" %s: %s"%sys.exc_info()[:2]
)
validKeys = DEFAULT_COMPILER_SETTINGS.keys()
if [k for k in settings.keys() if k not in validKeys]:
self.error(
'The --setting "%s" is not a valid compiler setting name.'%k)
self._compilerSettings = settings
return settings
else:
return {}
def _compileOrFillStdin(self):
TemplateClass = self._getTemplateClass()
compilerSettings = self._getCompilerSettings()
if self.isCompile:
pysrc = TemplateClass.compile(file=sys.stdin,
compilerSettings=compilerSettings,
returnAClass=False)
output = pysrc
else:
output = str(TemplateClass(file=sys.stdin, compilerSettings=compilerSettings))
sys.stdout.write(output)
def _compileOrFillBundle(self, b):
C, D, W = self.chatter, self.debug, self.warn
TemplateClass = self._getTemplateClass()
compilerSettings = self._getCompilerSettings()
src = b.src
dst = b.dst
base = b.base
basename = b.basename
dstDir = os.path.dirname(dst)
what = self.isCompile and "Compiling" or "Filling"
C("%s %s -> %s^", what, src, dst) # No trailing newline.
if os.path.exists(dst) and not self.opts.nobackup:
bak = b.bak
C(" (backup %s)", bak) # On same line as previous message.
else:
bak = None
C("")
if self.isCompile:
if not moduleNameRE.match(basename):
tup = basename, src
raise Error("""\
%s: base name %s contains invalid characters. It must
be named according to the same rules as Python modules.""" % tup)
pysrc = TemplateClass.compile(file=src, returnAClass=False,
moduleName=basename,
className=basename,
commandlineopts=self.opts,
compilerSettings=compilerSettings)
output = pysrc
else:
#output = str(TemplateClass(file=src, searchList=self.searchList))
tclass = TemplateClass.compile(file=src, compilerSettings=compilerSettings)
output = str(tclass(searchList=self.searchList))
if bak:
shutil.copyfile(dst, bak)
if dstDir and not os.path.exists(dstDir):
if self.isCompile:
mkdirsWithPyInitFiles(dstDir)
else:
os.makedirs(dstDir)
if self.opts.stdout:
sys.stdout.write(output)
else:
f = open(dst, 'w')
f.write(output)
f.close()
# Called when invoked as `cheetah`
def _cheetah():
CheetahWrapper().main()
# Called when invoked as `cheetah-compile`
def _cheetah_compile():
sys.argv.insert(1, "compile")
CheetahWrapper().main()
##################################################
## if run from the command line
if __name__ == '__main__': CheetahWrapper().main()
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
'''
Cheetah is an open source template engine and code generation tool.
It can be used standalone or combined with other tools and frameworks. Web
development is its principle use, but Cheetah is very flexible and is also being
used to generate C++ game code, Java, sql, form emails and even Python code.
Homepage
http://www.cheetahtemplate.org/
Documentation
http://cheetahtemplate.org/learn.html
Mailing list
cheetahtemplate-discuss@lists.sourceforge.net
Subscribe at
http://lists.sourceforge.net/lists/listinfo/cheetahtemplate-discuss
'''
from Version import *
| Python |
#!/usr/bin/env python
'''
Core module of Cheetah's Unit-testing framework
TODO
================================================================================
# combo tests
# negative test cases for expected exceptions
# black-box vs clear-box testing
# do some tests that run the Template for long enough to check that the refresh code works
'''
import sys
import unittest
from Cheetah.Tests import SyntaxAndOutput
from Cheetah.Tests import NameMapper
from Cheetah.Tests import Misc
from Cheetah.Tests import Filters
from Cheetah.Tests import Template
from Cheetah.Tests import Cheps
from Cheetah.Tests import Parser
from Cheetah.Tests import Regressions
from Cheetah.Tests import Unicode
from Cheetah.Tests import CheetahWrapper
from Cheetah.Tests import Analyzer
SyntaxAndOutput.install_eols()
suites = [
unittest.findTestCases(SyntaxAndOutput),
unittest.findTestCases(NameMapper),
unittest.findTestCases(Filters),
unittest.findTestCases(Template),
#unittest.findTestCases(Cheps),
unittest.findTestCases(Regressions),
unittest.findTestCases(Unicode),
unittest.findTestCases(Misc),
unittest.findTestCases(Parser),
unittest.findTestCases(Analyzer),
]
if not sys.platform.startswith('java'):
suites.append(unittest.findTestCases(CheetahWrapper))
if __name__ == '__main__':
runner = unittest.TextTestRunner()
if 'xml' in sys.argv:
import xmlrunner
runner = xmlrunner.XMLTestRunner(filename='Cheetah-Tests.xml')
results = runner.run(unittest.TestSuite(suites))
| Python |
"""
XML Test Runner for PyUnit
"""
# Written by Sebastian Rittau <srittau@jroger.in-berlin.de> and placed in
# the Public Domain. With contributions by Paolo Borelli.
__revision__ = "$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $"
import os.path
import re
import sys
import time
import traceback
import unittest
from StringIO import StringIO
from xml.sax.saxutils import escape
from StringIO import StringIO
class _TestInfo(object):
"""Information about a particular test.
Used by _XMLTestResult.
"""
def __init__(self, test, time):
_pieces = test.id().split('.')
(self._class, self._method) = ('.'.join(_pieces[:-1]), _pieces[-1])
self._time = time
self._error = None
self._failure = None
def print_report(self, stream):
"""Print information about this test case in XML format to the
supplied stream.
"""
stream.write(' <testcase classname="%(class)s" name="%(method)s" time="%(time).4f">' % \
{
"class": self._class,
"method": self._method,
"time": self._time,
})
if self._failure != None:
self._print_error(stream, 'failure', self._failure)
if self._error != None:
self._print_error(stream, 'error', self._error)
stream.write('</testcase>\n')
def _print_error(self, stream, tagname, error):
"""Print information from a failure or error to the supplied stream."""
text = escape(str(error[1]))
stream.write('\n')
stream.write(' <%s type="%s">%s\n' \
% (tagname, issubclass(error[0], Exception) and error[0].__name__ or str(error[0]), text))
tb_stream = StringIO()
traceback.print_tb(error[2], None, tb_stream)
stream.write(escape(tb_stream.getvalue()))
stream.write(' </%s>\n' % tagname)
stream.write(' ')
# Module level functions since Python 2.3 doesn't grok decorators
def create_success(test, time):
"""Create a _TestInfo instance for a successful test."""
return _TestInfo(test, time)
def create_failure(test, time, failure):
"""Create a _TestInfo instance for a failed test."""
info = _TestInfo(test, time)
info._failure = failure
return info
def create_error(test, time, error):
"""Create a _TestInfo instance for an erroneous test."""
info = _TestInfo(test, time)
info._error = error
return info
class _XMLTestResult(unittest.TestResult):
"""A test result class that stores result as XML.
Used by XMLTestRunner.
"""
def __init__(self, classname):
unittest.TestResult.__init__(self)
self._test_name = classname
self._start_time = None
self._tests = []
self._error = None
self._failure = None
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self._error = None
self._failure = None
self._start_time = time.time()
def stopTest(self, test):
time_taken = time.time() - self._start_time
unittest.TestResult.stopTest(self, test)
if self._error:
info = create_error(test, time_taken, self._error)
elif self._failure:
info = create_failure(test, time_taken, self._failure)
else:
info = create_success(test, time_taken)
self._tests.append(info)
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self._error = err
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self._failure = err
def print_report(self, stream, time_taken, out, err):
"""Prints the XML report to the supplied stream.
The time the tests took to perform as well as the captured standard
output and standard error streams must be passed in.a
"""
stream.write('<testsuite errors="%(e)d" failures="%(f)d" ' % \
{ "e": len(self.errors), "f": len(self.failures) })
stream.write('name="%(n)s" tests="%(t)d" time="%(time).3f">\n' % \
{
"n": self._test_name,
"t": self.testsRun,
"time": time_taken,
})
for info in self._tests:
info.print_report(stream)
stream.write(' <system-out><![CDATA[%s]]></system-out>\n' % out)
stream.write(' <system-err><![CDATA[%s]]></system-err>\n' % err)
stream.write('</testsuite>\n')
class XMLTestRunner(object):
"""A test runner that stores results in XML format compatible with JUnit.
XMLTestRunner(stream=None) -> XML test runner
The XML file is written to the supplied stream. If stream is None, the
results are stored in a file called TEST-<module>.<class>.xml in the
current working directory (if not overridden with the path property),
where <module> and <class> are the module and class name of the test class.
"""
def __init__(self, *args, **kwargs):
self._stream = kwargs.get('stream')
self._filename = kwargs.get('filename')
self._path = "."
def run(self, test):
"""Run the given test case or test suite."""
class_ = test.__class__
classname = class_.__module__ + "." + class_.__name__
if self._stream == None:
filename = "TEST-%s.xml" % classname
if self._filename:
filename = self._filename
stream = file(os.path.join(self._path, filename), "w")
stream.write('<?xml version="1.0" encoding="utf-8"?>\n')
else:
stream = self._stream
result = _XMLTestResult(classname)
start_time = time.time()
# TODO: Python 2.5: Use the with statement
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
test(result)
try:
out_s = sys.stdout.getvalue()
except AttributeError:
out_s = ""
try:
err_s = sys.stderr.getvalue()
except AttributeError:
err_s = ""
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
time_taken = time.time() - start_time
result.print_report(stream, time_taken, out_s, err_s)
if self._stream == None:
stream.close()
return result
def _set_path(self, path):
self._path = path
path = property(lambda self: self._path, _set_path, None,
"""The path where the XML files are stored.
This property is ignored when the XML file is written to a file
stream.""")
class XMLTestRunnerTest(unittest.TestCase):
def setUp(self):
self._stream = StringIO()
def _try_test_run(self, test_class, expected):
"""Run the test suite against the supplied test class and compare the
XML result against the expected XML string. Fail if the expected
string doesn't match the actual string. All time attribute in the
expected string should have the value "0.000". All error and failure
messages are reduced to "Foobar".
"""
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(test_class))
got = self._stream.getvalue()
# Replace all time="X.YYY" attributes by time="0.000" to enable a
# simple string comparison.
got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got)
# Likewise, replace all failure and error messages by a simple "Foobar"
# string.
got = re.sub(r'(?s)<failure (.*?)>.*?</failure>', r'<failure \1>Foobar</failure>', got)
got = re.sub(r'(?s)<error (.*?)>.*?</error>', r'<error \1>Foobar</error>', got)
self.assertEqual(expected, got)
def test_no_tests(self):
"""Regression test: Check whether a test run without any tests
matches a previous run.
"""
class TestTest(unittest.TestCase):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="0" time="0.000">
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_success(self):
"""Regression test: Check whether a test run with a successful test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_failure(self):
"""Regression test: Check whether a test run with a failing test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
self.assert_(False)
self._try_test_run(TestTest, """<testsuite errors="0" failures="1" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<failure type="exceptions.AssertionError">Foobar</failure>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_error(self):
"""Regression test: Check whether a test run with a erroneous test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
raise IndexError()
self._try_test_run(TestTest, """<testsuite errors="1" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<error type="exceptions.IndexError">Foobar</error>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stdout_capture(self):
"""Regression test: Check whether a test run with output to stdout
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
print("Test")
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[Test
]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stderr_capture(self):
"""Regression test: Check whether a test run with output to stderr
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr.write('Test\n')
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[Test
]]></system-err>
</testsuite>
""")
class NullStream(object):
"""A file-like object that discards everything written to it."""
def write(self, buffer):
pass
def test_unittests_changing_stdout(self):
"""Check whether the XMLTestRunner recovers gracefully from unit tests
that change stdout, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stdout = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
def test_unittests_changing_stderr(self):
"""Check whether the XMLTestRunner recovers gracefully from unit tests
that change stderr, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
class XMLTestProgram(unittest.TestProgram):
def runTests(self):
if self.testRunner is None:
self.testRunner = XMLTestRunner()
unittest.TestProgram.runTests(self)
main = XMLTestProgram
if __name__ == "__main__":
main(module=None)
| Python |
#!/usr/bin/env python
# -*- encoding: utf8 -*-
from Cheetah.Template import Template
from Cheetah import CheetahWrapper
from Cheetah import DummyTransaction
import imp
import os
import sys
import tempfile
import unittest
class CommandLineTest(unittest.TestCase):
def createAndCompile(self, source):
sourcefile = '-'
while sourcefile.find('-') != -1:
sourcefile = tempfile.mktemp()
fd = open('%s.tmpl' % sourcefile, 'w')
fd.write(source)
fd.close()
wrap = CheetahWrapper.CheetahWrapper()
wrap.main(['cheetah', 'compile', '--quiet', '--nobackup', sourcefile])
module_path, module_name = os.path.split(sourcefile)
module = loadModule(module_name, [module_path])
template = getattr(module, module_name)
return template
class JBQ_UTF8_Test1(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String'
t.other.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test2(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String with eacute é'
t.other.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test3(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String with eacute é'
t.other.v = u'Unicode String and an eacute é'
assert unicode(t())
class JBQ_UTF8_Test4(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""#encoding utf-8
Main file with |$v| and eacute in the template é""")
t.v = 'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test5(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""#encoding utf-8
Main file with |$v| and eacute in the template é""")
t.v = u'Unicode String'
assert unicode(t())
def loadModule(moduleName, path=None):
if path:
assert isinstance(path, list)
try:
mod = sys.modules[moduleName]
except KeyError:
fp = None
try:
fp, pathname, description = imp.find_module(moduleName, path)
mod = imp.load_module(moduleName, fp, pathname, description)
finally:
if fp:
fp.close()
return mod
class JBQ_UTF8_Test6(unittest.TestCase):
def runTest(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
Main file with |$v| and eacute in the template é"""
t = Template.compile(source=source)
t.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test7(CommandLineTest):
def runTest(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
Main file with |$v| and eacute in the template é"""
template = self.createAndCompile(source)
template.v = u'Unicode String'
assert unicode(template())
class JBQ_UTF8_Test8(CommandLineTest):
def testStaticCompile(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
template = self.createAndCompile(source)()
a = unicode(template).encode("utf-8")
self.assertEquals("Bébé", a)
def testDynamicCompile(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
template = Template(source = source)
a = unicode(template).encode("utf-8")
self.assertEquals("Bébé", a)
class EncodeUnicodeCompatTest(unittest.TestCase):
"""
Taken initially from Red Hat's bugzilla #529332
https://bugzilla.redhat.com/show_bug.cgi?id=529332
"""
def runTest(self):
t = Template("""Foo ${var}""", filter='EncodeUnicode')
t.var = u"Text with some non-ascii characters: åäö"
rc = t.respond()
assert isinstance(rc, unicode), ('Template.respond() should return unicode', rc)
rc = str(t)
assert isinstance(rc, str), ('Template.__str__() should return a UTF-8 encoded string', rc)
class Unicode_in_SearchList_Test(CommandLineTest):
def test_BasicASCII(self):
source = '''This is $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'adjective' : u'neat'}])
assert template.respond()
def test_Thai(self):
# The string is something in Thai
source = '''This is $foo $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'foo' : 'bar',
'adjective' : u'\u0e22\u0e34\u0e19\u0e14\u0e35\u0e15\u0e49\u0e2d\u0e19\u0e23\u0e31\u0e1a'}])
assert template.respond()
def test_Thai_utf8(self):
utf8 = '\xe0\xb8\xa2\xe0\xb8\xb4\xe0\xb8\x99\xe0\xb8\x94\xe0\xb8\xb5\xe0\xb8\x95\xe0\xb9\x89\xe0\xb8\xad\xe0\xb8\x99\xe0\xb8\xa3\xe0\xb8\xb1\xe0\xb8\x9a'
source = '''This is $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'adjective' : utf8}])
assert template.respond()
class InlineSpanishTest(unittest.TestCase):
def setUp(self):
super(InlineSpanishTest, self).setUp()
self.template = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Pagina del vendedor</title>
</head>
<body>
$header
<h2>Bienvenido $nombre.</h2>
<br /><br /><br />
<center>
Usted tiene $numpedidos_noconf <a href="">pedidós</a> sin confirmar.
<br /><br />
Bodega tiene fecha para $numpedidos_bodega <a href="">pedidos</a>.
</center>
</body>
</html>
'''
def test_failure(self):
""" Test a template lacking a proper #encoding tag """
self.failUnlessRaises(UnicodeDecodeError, Template, self.template, searchList=[{'header' : '',
'nombre' : '', 'numpedidos_bodega' : '',
'numpedidos_noconf' : ''}])
def test_success(self):
""" Test a template with a proper #encoding tag """
template = '#encoding utf-8\n%s' % self.template
template = Template(template, searchList=[{'header' : '',
'nombre' : '', 'numpedidos_bodega' : '',
'numpedidos_noconf' : ''}])
self.assertTrue(unicode(template))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import sys
import unittest
import Cheetah.Template
import Cheetah.Filters
majorVer, minorVer = sys.version_info[0], sys.version_info[1]
versionTuple = (majorVer, minorVer)
class BasicMarkdownFilterTest(unittest.TestCase):
'''
Test that our markdown filter works
'''
def test_BasicHeader(self):
template = '''
#from Cheetah.Filters import Markdown
#transform Markdown
$foo
Header
======
'''
expected = '''<p>bar</p>
<h1>Header</h1>'''
try:
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template == expected
except ImportError, ex:
print('>>> We probably failed to import markdown, bummer %s' % ex)
return
except Exception, ex:
if ex.__class__.__name__ == 'MarkdownException' and majorVer == 2 and minorVer < 5:
print('>>> NOTE: Support for the Markdown filter will be broken for you. Markdown says: %s' % ex)
return
raise
class BasicCodeHighlighterFilterTest(unittest.TestCase):
'''
Test that our code highlighter filter works
'''
def test_Python(self):
template = '''
#from Cheetah.Filters import CodeHighlighter
#transform CodeHighlighter
def foo(self):
return '$foo'
'''
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template, (template, 'We should have some content here...')
def test_Html(self):
template = '''
#from Cheetah.Filters import CodeHighlighter
#transform CodeHighlighter
<html><head></head><body>$foo</body></html>
'''
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template, (template, 'We should have some content here...')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import unittest
from Cheetah import SettingsManager
class SettingsManagerTests(unittest.TestCase):
def test_mergeDictionaries(self):
left = {'foo' : 'bar', 'abc' : {'a' : 1, 'b' : 2, 'c' : (3,)}}
right = {'xyz' : (10, 9)}
expect = {'xyz': (10, 9), 'foo': 'bar', 'abc': {'a': 1, 'c': (3,), 'b': 2}}
result = SettingsManager.mergeNestedDictionaries(left, right)
self.assertEquals(result, expect)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import unittest
from Cheetah import Parser
class ArgListTest(unittest.TestCase):
def setUp(self):
super(ArgListTest, self).setUp()
self.al = Parser.ArgList()
def test_merge1(self):
'''
Testing the ArgList case results from Template.Preprocessors.test_complexUsage
'''
self.al.add_argument('arg')
expect = [('arg', None)]
self.assertEquals(expect, self.al.merge())
def test_merge2(self):
'''
Testing the ArgList case results from SyntaxAndOutput.BlockDirective.test4
'''
self.al.add_argument('a')
self.al.add_default('999')
self.al.next()
self.al.add_argument('b')
self.al.add_default('444')
expect = [(u'a', u'999'), (u'b', u'444')]
self.assertEquals(expect, self.al.merge())
def test_merge3(self):
'''
Testing the ArgList case results from SyntaxAndOutput.BlockDirective.test13
'''
self.al.add_argument('arg')
self.al.add_default("'This is my block'")
expect = [('arg', "'This is my block'")]
self.assertEquals(expect, self.al.merge())
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import unittest
from Cheetah import DirectiveAnalyzer
class AnalyzerTests(unittest.TestCase):
def test_set(self):
template = '''
#set $foo = "bar"
Hello ${foo}!
'''
calls = DirectiveAnalyzer.analyze(template)
self.assertEquals(1, calls.get('set'))
def test_compilersettings(self):
template = '''
#compiler-settings
useNameMapper = False
#end compiler-settings
'''
calls = DirectiveAnalyzer.analyze(template)
self.assertEquals(1, calls.get('compiler-settings'))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import unittest
import Cheetah
import Cheetah.Parser
import Cheetah.Template
class Chep_2_Conditionalized_Import_Behavior(unittest.TestCase):
def test_ModuleLevelImport(self):
''' Verify module level (traditional) import behavior '''
pass
def test_InlineImport(self):
''' Verify (new) inline import behavior works '''
template = '''
#def funky($s)
#try
#import urllib
#except ImportError
#pass
#end try
#return urllib.quote($s)
#end def
'''
try:
template = Cheetah.Template.Template.compile(template)
except Cheetah.Parser.ParseError, ex:
self.fail('Failed to properly generate code %s' % ex)
template = template()
rc = tepmlate.funky('abc def')
assert rc == 'abc+def'
def test_LegacyMode(self):
''' Verify disabling of CHEP #2 works '''
pass
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import Cheetah.NameMapper
import Cheetah.Template
import sys
import unittest
majorVer, minorVer = sys.version_info[0], sys.version_info[1]
versionTuple = (majorVer, minorVer)
def isPython23():
''' Python 2.3 is still supported by Cheetah, but doesn't support decorators '''
return majorVer == 2 and minorVer < 4
class GetAttrException(Exception):
pass
class CustomGetAttrClass(object):
def __getattr__(self, name):
raise GetAttrException('FAIL, %s' % name)
class GetAttrTest(unittest.TestCase):
'''
Test for an issue occurring when __getatttr__() raises an exception
causing NameMapper to raise a NotFound exception
'''
def test_ValidException(self):
o = CustomGetAttrClass()
try:
print(o.attr)
except GetAttrException, e:
# expected
return
except:
self.fail('Invalid exception raised: %s' % e)
self.fail('Should have had an exception raised')
def test_NotFoundException(self):
template = '''
#def raiseme()
$obj.attr
#end def'''
template = Cheetah.Template.Template.compile(template, compilerSettings={}, keepRefToGeneratedCode=True)
template = template(searchList=[{'obj' : CustomGetAttrClass()}])
assert template, 'We should have a valid template object by now'
self.failUnlessRaises(GetAttrException, template.raiseme)
class InlineImportTest(unittest.TestCase):
def test_FromFooImportThing(self):
'''
Verify that a bug introduced in v2.1.0 where an inline:
#from module import class
would result in the following code being generated:
import class
'''
template = '''
#def myfunction()
#if True
#from os import path
#return 17
Hello!
#end if
#end def
'''
template = Cheetah.Template.Template.compile(template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
template = template(searchList=[{}])
assert template, 'We should have a valid template object by now'
rc = template.myfunction()
assert rc == 17, (template, 'Didn\'t get a proper return value')
def test_ImportFailModule(self):
template = '''
#try
#import invalidmodule
#except
#set invalidmodule = dict(FOO='BAR!')
#end try
$invalidmodule.FOO
'''
template = Cheetah.Template.Template.compile(template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
template = template(searchList=[{}])
assert template, 'We should have a valid template object by now'
assert str(template), 'We weren\'t able to properly generate the result from the template'
def test_ProperImportOfBadModule(self):
template = '''
#from invalid import fail
This should totally $fail
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
def test_AutoImporting(self):
template = '''
#extends FakeyTemplate
Boo!
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template)
def test_StuffBeforeImport_Legacy(self):
template = '''
###
### I like comments before import
###
#extends Foo
Bar
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template, compilerSettings={'useLegacyImportMode' : True}, keepRefToGeneratedCode=True)
class Mantis_Issue_11_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in Mantis issue #11:
Output:
Traceback (most recent call last):
File "test.py", line 12, in <module>
t.respond()
File "DynamicallyCompiledCheetahTemplate.py", line 86, in respond
File "/usr/lib64/python2.6/cgi.py", line 1035, in escape
s = s.replace("&", "&") # Must be done first!
'''
def test_FailingBehavior(self):
import cgi
template = Cheetah.Template.Template("$escape($request)", searchList=[{'escape' : cgi.escape, 'request' : 'foobar'}])
assert template
self.failUnlessRaises(AttributeError, template.respond)
def test_FailingBehaviorWithSetting(self):
import cgi
template = Cheetah.Template.Template("$escape($request)",
searchList=[{'escape' : cgi.escape, 'request' : 'foobar'}],
compilerSettings={'prioritizeSearchListOverSelf' : True})
assert template
assert template.respond()
class Mantis_Issue_21_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in issue #21
Effectively @staticmethod and @classmethod
decorated methods in templates don't
properly define the _filter local, which breaks
when using the NameMapper
'''
def runTest(self):
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
This is my $output
#end def
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug') # raises a NameError: global name '_filter' is not defined
class Mantis_Issue_22_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in issue #22
When using @staticmethod and @classmethod
in conjunction with the #filter directive
the generated code for the #filter is reliant
on the `self` local, breaking the function
'''
def test_NoneFilter(self):
# XXX: Disabling this test for now
return
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
#filter None
This is my $output
#end filter
#end def
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug')
def test_DefinedFilter(self):
# XXX: Disabling this test for now
return
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
#filter Filter
This is my $output
#end filter
#end def
'''
# The generated code for the template's testMethod() should look something
# like this in the 'error' case:
'''
@staticmethod
def testMethod(**KWS):
## CHEETAH: generated from #def testMethod() at line 3, col 13.
trans = DummyTransaction()
_dummyTrans = True
write = trans.response().write
SL = [KWS]
_filter = lambda x, **kwargs: unicode(x)
########################################
## START - generated method body
_orig_filter_18517345 = _filter
filterName = u'Filter'
if self._CHEETAH__filters.has_key("Filter"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u' This is my ')
_v = VFFSL(SL,"output",True) # u'$output' on line 5, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$output')) # from line 5, col 32.
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
'''
Tests for the 'cheetah' command.
Besides unittest usage, recognizes the following command-line options:
--list CheetahWrapper.py
List all scenarios that are tested. The argument is the path
of this script.
--nodelete
Don't delete scratch directory at end.
--output
Show the output of each subcommand. (Normally suppressed.)
'''
import os
import os.path
import pdb
import re # Used by listTests.
import shutil
import sys
import tempfile
import unittest
from optparse import OptionParser
from Cheetah.CheetahWrapper import CheetahWrapper # Used by NoBackup.
try:
from subprocess import Popen, PIPE, STDOUT
class Popen4(Popen):
def __init__(self, cmd, bufsize=-1, shell=True, close_fds=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, **kwargs):
super(Popen4, self).__init__(cmd, bufsize=bufsize, shell=shell,
close_fds=close_fds, stdin=stdin, stdout=stdout,
stderr=stderr, **kwargs)
self.tochild = self.stdin
self.fromchild = self.stdout
self.childerr = self.stderr
except ImportError:
from popen2 import Popen4
DELETE = True # True to clean up after ourselves, False for debugging.
OUTPUT = False # Normally False, True for debugging.
BACKUP_SUFFIX = CheetahWrapper.BACKUP_SUFFIX
def warn(msg):
sys.stderr.write(msg + '\n')
class CFBase(unittest.TestCase):
"""Base class for "cheetah compile" and "cheetah fill" unit tests.
"""
srcDir = '' # Nonblank to create source directory.
subdirs = ('child', 'child/grandkid') # Delete in reverse order.
srcFiles = ('a.tmpl', 'child/a.tmpl', 'child/grandkid/a.tmpl')
expectError = False # Used by --list option.
def inform(self, message):
if self.verbose:
print(message)
def setUp(self):
"""Create the top-level directories, subdirectories and .tmpl
files.
"""
I = self.inform
# Step 1: Create the scratch directory and chdir into it.
self.scratchDir = scratchDir = tempfile.mktemp()
os.mkdir(scratchDir)
self.origCwd = os.getcwd()
os.chdir(scratchDir)
if self.srcDir:
os.mkdir(self.srcDir)
# Step 2: Create source subdirectories.
for dir in self.subdirs:
os.mkdir(dir)
# Step 3: Create the .tmpl files, each in its proper directory.
for fil in self.srcFiles:
f = open(fil, 'w')
f.write("Hello, world!\n")
f.close()
def tearDown(self):
os.chdir(self.origCwd)
if DELETE:
shutil.rmtree(self.scratchDir, True) # Ignore errors.
if os.path.exists(self.scratchDir):
warn("Warning: unable to delete scratch directory %s")
else:
warn("Warning: not deleting scratch directory %s" % self.scratchDir)
def _checkDestFileHelper(self, path, expected,
allowSurroundingText, errmsg):
"""Low-level helper to check a destination file.
in : path, string, the destination path.
expected, string, the expected contents.
allowSurroundingtext, bool, allow the result to contain
additional text around the 'expected' substring?
errmsg, string, the error message. It may contain the
following "%"-operator keys: path, expected, result.
out: None
"""
path = os.path.abspath(path)
exists = os.path.exists(path)
msg = "destination file missing: %s" % path
self.failUnless(exists, msg)
f = open(path, 'r')
result = f.read()
f.close()
if allowSurroundingText:
success = result.find(expected) != -1
else:
success = result == expected
msg = errmsg % locals()
self.failUnless(success, msg)
def checkCompile(self, path):
# Raw string to prevent "\n" from being converted to a newline.
#expected = R"write('Hello, world!\n')"
expected = "Hello, world!" # might output a u'' string
errmsg = """\
destination file %(path)s doesn't contain expected substring:
%(expected)r"""
self._checkDestFileHelper(path, expected, True, errmsg)
def checkFill(self, path):
expected = "Hello, world!\n"
errmsg = """\
destination file %(path)s contains wrong result.
Expected %(expected)r
Found %(result)r"""
self._checkDestFileHelper(path, expected, False, errmsg)
def checkSubdirPyInit(self, path):
"""Verify a destination subdirectory exists and contains an
__init__.py file.
"""
exists = os.path.exists(path)
msg = "destination subdirectory %s misssing" % path
self.failUnless(exists, msg)
initPath = os.path.join(path, "__init__.py")
exists = os.path.exists(initPath)
msg = "destination init file missing: %s" % initPath
self.failUnless(exists, msg)
def checkNoBackup(self, path):
"""Verify 'path' does not exist. (To check --nobackup.)
"""
exists = os.path.exists(path)
msg = "backup file exists in spite of --nobackup: %s" % path
self.failIf(exists, msg)
def locate_command(self, cmd):
paths = os.getenv('PATH')
if not paths:
return cmd
parts = cmd.split(' ')
paths = paths.split(':')
for p in paths:
p = p + os.path.sep + parts[0]
if os.path.isfile(p):
return ' '.join([p] + parts[1:])
return ' '.join(parts)
def assertWin32Subprocess(self, cmd):
_in, _out = os.popen4(cmd)
_in.close()
output = _out.read()
rc = _out.close()
if rc is None:
rc = 0
return rc, output
def assertPosixSubprocess(self, cmd):
cmd = self.locate_command(cmd)
process = Popen4(cmd, env=os.environ)
process.tochild.close()
output = process.fromchild.read()
status = process.wait()
process.fromchild.close()
return status, output
def assertSubprocess(self, cmd, nonzero=False):
status, output = None, None
if sys.platform == 'win32':
status, output = self.assertWin32Subprocess(cmd)
else:
status, output = self.assertPosixSubprocess(cmd)
if not nonzero:
self.failUnlessEqual(status, 0, '''Subprocess exited with a non-zero status (%d)
%s''' % (status, output))
else:
self.failIfEqual(status, 0, '''Subprocess exited with a zero status (%d)
%s''' % (status, output))
return output
def go(self, cmd, expectedStatus=0, expectedOutputSubstring=None):
"""Run a "cheetah compile" or "cheetah fill" subcommand.
in : cmd, string, the command to run.
expectedStatus, int, subcommand's expected output status.
0 if the subcommand is expected to succeed, 1-255 otherwise.
expectedOutputSubstring, string, substring which much appear
in the standard output or standard error. None to skip this
test.
out: None.
"""
output = self.assertSubprocess(cmd)
if expectedOutputSubstring is not None:
msg = "substring %r not found in subcommand output: %s" % \
(expectedOutputSubstring, cmd)
substringTest = output.find(expectedOutputSubstring) != -1
self.failUnless(substringTest, msg)
class CFIdirBase(CFBase):
"""Subclass for tests with --idir.
"""
srcDir = 'SRC'
subdirs = ('SRC/child', 'SRC/child/grandkid') # Delete in reverse order.
srcFiles = ('SRC/a.tmpl', 'SRC/child/a.tmpl', 'SRC/child/grandkid/a.tmpl')
##################################################
## TEST CASE CLASSES
class OneFile(CFBase):
def testCompile(self):
self.go("cheetah compile a.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill a.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt a.tmpl")
self.checkFill("a.txt")
class OneFileNoExtension(CFBase):
def testCompile(self):
self.go("cheetah compile a")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill a")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt a")
self.checkFill("a.txt")
class SplatTmpl(CFBase):
def testCompile(self):
self.go("cheetah compile *.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill *.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt *.tmpl")
self.checkFill("a.txt")
class ThreeFilesWithSubdirectories(CFBase):
def testCompile(self):
self.go("cheetah compile a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class ThreeFilesWithSubdirectoriesNoExtension(CFBase):
def testCompile(self):
self.go("cheetah compile a child/a child/grandkid/a")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill a child/a child/grandkid/a")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt a child/a child/grandkid/a")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class SplatTmplWithSubdirectories(CFBase):
def testCompile(self):
self.go("cheetah compile *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class OneFileWithOdir(CFBase):
def testCompile(self):
self.go("cheetah compile --odir DEST a.tmpl")
self.checkSubdirPyInit("DEST")
self.checkCompile("DEST/a.py")
def testFill(self):
self.go("cheetah fill --odir DEST a.tmpl")
self.checkFill("DEST/a.html")
def testText(self):
self.go("cheetah fill --odir DEST --oext txt a.tmpl")
self.checkFill("DEST/a.txt")
class VarietyWithOdir(CFBase):
def testCompile(self):
self.go("cheetah compile --odir DEST a.tmpl child/a child/grandkid/*.tmpl")
self.checkSubdirPyInit("DEST")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/a.py")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill --odir DEST a.tmpl child/a child/grandkid/*.tmpl")
self.checkFill("DEST/a.html")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --odir DEST --oext txt a.tmpl child/a child/grandkid/*.tmpl")
self.checkFill("DEST/a.txt")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class RecurseExplicit(CFBase):
def testCompile(self):
self.go("cheetah compile -R child")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R child")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --oext txt child")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class RecurseImplicit(CFBase):
def testCompile(self):
self.go("cheetah compile -R")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --oext txt")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class RecurseExplicitWIthOdir(CFBase):
def testCompile(self):
self.go("cheetah compile -R --odir DEST child")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --odir DEST child")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --odir DEST --oext txt child")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class Flat(CFBase):
def testCompile(self):
self.go("cheetah compile --flat child/a.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill --flat child/a.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --flat --oext txt child/a.tmpl")
self.checkFill("a.txt")
class FlatRecurseCollision(CFBase):
expectError = True
def testCompile(self):
self.assertSubprocess("cheetah compile -R --flat", nonzero=True)
def testFill(self):
self.assertSubprocess("cheetah fill -R --flat", nonzero=True)
def testText(self):
self.assertSubprocess("cheetah fill -R --flat", nonzero=True)
class IdirRecurse(CFIdirBase):
def testCompile(self):
self.go("cheetah compile -R --idir SRC child")
self.checkSubdirPyInit("child")
self.checkSubdirPyInit("child/grandkid")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --idir SRC child")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --idir SRC --oext txt child")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class IdirOdirRecurse(CFIdirBase):
def testCompile(self):
self.go("cheetah compile -R --idir SRC --odir DEST child")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --idir SRC --odir DEST child")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --idir SRC --odir DEST --oext txt child")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class IdirFlatRecurseCollision(CFIdirBase):
expectError = True
def testCompile(self):
self.assertSubprocess("cheetah compile -R --flat --idir SRC", nonzero=True)
def testFill(self):
self.assertSubprocess("cheetah fill -R --flat --idir SRC", nonzero=True)
def testText(self):
self.assertSubprocess("cheetah fill -R --flat --idir SRC --oext txt", nonzero=True)
class NoBackup(CFBase):
"""Run the command twice each time and verify a backup file is
*not* created.
"""
def testCompile(self):
self.go("cheetah compile --nobackup a.tmpl")
self.go("cheetah compile --nobackup a.tmpl")
self.checkNoBackup("a.py" + BACKUP_SUFFIX)
def testFill(self):
self.go("cheetah fill --nobackup a.tmpl")
self.go("cheetah fill --nobackup a.tmpl")
self.checkNoBackup("a.html" + BACKUP_SUFFIX)
def testText(self):
self.go("cheetah fill --nobackup --oext txt a.tmpl")
self.go("cheetah fill --nobackup --oext txt a.tmpl")
self.checkNoBackup("a.txt" + BACKUP_SUFFIX)
def listTests(cheetahWrapperFile):
"""cheetahWrapperFile, string, path of this script.
XXX TODO: don't print test where expectError is true.
"""
rx = re.compile( R'self\.go\("(.*?)"\)' )
f = open(cheetahWrapperFile)
while True:
lin = f.readline()
if not lin:
break
m = rx.search(lin)
if m:
print(m.group(1))
f.close()
def main():
global DELETE, OUTPUT
parser = OptionParser()
parser.add_option("--list", action="store", dest="listTests")
parser.add_option("--nodelete", action="store_true")
parser.add_option("--output", action="store_true")
# The following options are passed to unittest.
parser.add_option("-e", "--explain", action="store_true")
parser.add_option("-v", "--verbose", action="store_true")
parser.add_option("-q", "--quiet", action="store_true")
opts, files = parser.parse_args()
if opts.nodelete:
DELETE = False
if opts.output:
OUTPUT = True
if opts.listTests:
listTests(opts.listTests)
else:
# Eliminate script-specific command-line arguments to prevent
# errors in unittest.
del sys.argv[1:]
for opt in ("explain", "verbose", "quiet"):
if getattr(opts, opt):
sys.argv.append("--" + opt)
sys.argv.extend(files)
unittest.main()
if __name__ == '__main__':
main()
# vim: sw=4 ts=4 expandtab
| Python |
#
| Python |
#!/usr/bin/env python
import hotshot
import hotshot.stats
import os
import sys
import unittest
from test import pystone
import time
import Cheetah.NameMapper
import Cheetah.Template
# This can be turned on with the `--debug` flag when running the test
# and will cause the tests to all just dump out how long they took
# insteasd of asserting on duration
DEBUG = False
# TOLERANCE in Pystones
kPS = 1000
TOLERANCE = 0.5*kPS
class DurationError(AssertionError):
pass
_pystone_calibration_mark = None
def _pystone_calibration():
global _pystone_calibration_mark
if not _pystone_calibration_mark:
_pystone_calibration_mark = pystone.pystones(loops=pystone.LOOPS)
return _pystone_calibration_mark
def perftest(max_num_pystones, current_pystone=None):
'''
Performance test decorator based off the 'timedtest'
decorator found in this Active State recipe:
http://code.activestate.com/recipes/440700/
'''
if not isinstance(max_num_pystones, float):
max_num_pystones = float(max_num_pystones)
if not current_pystone:
current_pystone = _pystone_calibration()
def _test(function):
def wrapper(*args, **kw):
start_time = time.time()
try:
return function(*args, **kw)
finally:
total_time = time.time() - start_time
if total_time == 0:
pystone_total_time = 0
else:
pystone_rate = current_pystone[0] / current_pystone[1]
pystone_total_time = total_time / pystone_rate
global DEBUG
if DEBUG:
print('The test "%s" took: %s pystones' % (function.func_name,
pystone_total_time))
else:
if pystone_total_time > (max_num_pystones + TOLERANCE):
raise DurationError((('Test too long (%.2f Ps, '
'need at most %.2f Ps)')
% (pystone_total_time,
max_num_pystones)))
return wrapper
return _test
class DynamicTemplatePerformanceTest(unittest.TestCase):
loops = 10
#@perftest(1200)
def test_BasicDynamic(self):
template = '''
#def foo(arg1, arg2)
#pass
#end def
'''
for i in range(self.loops):
klass = Cheetah.Template.Template.compile(template)
assert klass
test_BasicDynamic = perftest(1200)(test_BasicDynamic)
class PerformanceTest(unittest.TestCase):
iterations = 100000
display = False
save = False
def runTest(self):
self.prof = hotshot.Profile('%s.prof' % self.__class__.__name__)
self.prof.start()
for i in range(self.iterations):
if hasattr(self, 'performanceSample'):
self.display = True
self.performanceSample()
self.prof.stop()
self.prof.close()
if self.display:
print('>>> %s (%d iterations) ' % (self.__class__.__name__,
self.iterations))
stats = hotshot.stats.load('%s.prof' % self.__class__.__name__)
#stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
if not self.save:
os.unlink('%s.prof' % self.__class__.__name__)
class DynamicMethodCompilationTest(PerformanceTest):
def performanceSample(self):
template = '''
#import sys
#import os
#def testMethod()
#set foo = [1, 2, 3, 4]
#return $foo[0]
#end def
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
value = template.testMethod()
class BunchOfWriteCalls(PerformanceTest):
iterations = 1000
def performanceSample(self):
template = '''
#import sys
#import os
#for i in range(1000)
$i
#end for
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
value = template.respond()
del value
class DynamicSimpleCompilationTest(PerformanceTest):
def performanceSample(self):
template = '''
#import sys
#import os
#set foo = [1,2,3,4]
Well hello there! This is basic.
Here's an array too: $foo
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
template = unicode(template)
class FilterTest(PerformanceTest):
template = None
def setUp(self):
super(FilterTest, self).setUp()
template = '''
#import sys
#import os
#set foo = [1, 2, 3, 4]
$foo, $foo, $foo
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
self.template = template()
def performanceSample(self):
value = unicode(self.template)
class LongCompileTest(PerformanceTest):
''' Test the compilation on a sufficiently large template '''
def compile(self, template):
return Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False)
def performanceSample(self):
template = '''
#import sys
#import Cheetah.Template
#extends Cheetah.Template.Template
#def header()
<center><h2>This is my header</h2></center>
#end def
#def footer()
#return "Huzzah"
#end def
#def scripts()
#pass
#end def
#def respond()
<html>
<head>
<title>${title}</title>
$scripts()
</head>
<body>
$header()
#for $i in $range(10)
This is just some stupid page!
<br/>
#end for
<br/>
$footer()
</body>
</html>
#end def
'''
return self.compile(template)
class LongCompile_CompilerSettingsTest(LongCompileTest):
def compile(self, template):
return Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False,
compilerSettings={'useStackFrames' : True, 'useAutocalling' : True})
class LongCompileAndRun(LongCompileTest):
def performanceSample(self):
template = super(LongCompileAndRun, self).performanceSample()
template = template(searchList=[{'title' : 'foo'}])
template = template.respond()
if __name__ == '__main__':
if '--debug' in sys.argv:
DEBUG = True
sys.argv = [arg for arg in sys.argv if not arg == '--debug']
unittest.main()
| Python |
#!/usr/bin/env python
import sys
import types
import os
import os.path
import unittest
from Cheetah.NameMapper import NotFound, valueForKey, \
valueForName, valueFromSearchList, valueFromFrame, valueFromFrameOrSearchList
class DummyClass:
classVar1 = 123
def __init__(self):
self.instanceVar1 = 123
def __str__(self):
return 'object'
def meth(self, arg="arff"):
return str(arg)
def meth1(self, arg="doo"):
return arg
def meth2(self, arg1="a1", arg2="a2"):
raise ValueError
def meth3(self):
"""Tests a bug that Jeff Johnson reported on Oct 1, 2001"""
x = 'A string'
try:
for i in [1, 2, 3, 4]:
if x == 2:
pass
if x == 'xx':
pass
return x
except:
raise
def dummyFunc(arg="Scooby"):
return arg
def funcThatRaises():
raise ValueError
testNamespace = {
'aStr': 'blarg',
'anInt': 1,
'aFloat': 1.5,
'aDict': {'one': 'item1',
'two': 'item2',
'nestedDict': {'one': 'nestedItem1',
'two': 'nestedItem2',
'funcThatRaises': funcThatRaises,
'aClass': DummyClass,
},
'nestedFunc': dummyFunc,
},
'aClass': DummyClass,
'aFunc': dummyFunc,
'anObj': DummyClass(),
'aMeth': DummyClass().meth1,
'none': None,
'emptyString': '',
'funcThatRaises': funcThatRaises,
}
autoCallResults = {'aFunc': 'Scooby',
'aMeth': 'doo',
}
results = testNamespace.copy()
results.update({'anObj.meth1': 'doo',
'aDict.one': 'item1',
'aDict.nestedDict': testNamespace['aDict']['nestedDict'],
'aDict.nestedDict.one': 'nestedItem1',
'aDict.nestedDict.aClass': DummyClass,
'aDict.nestedFunc': 'Scooby',
'aClass.classVar1': 123,
'anObj.instanceVar1': 123,
'anObj.meth3': 'A string',
})
for k in testNamespace.keys():
# put them in the globals for the valueFromFrame tests
exec('%s = testNamespace[k]'%k)
##################################################
## TEST BASE CLASSES
class NameMapperTest(unittest.TestCase):
failureException = (NotFound, AssertionError)
_testNamespace = testNamespace
_results = results
def namespace(self):
return self._testNamespace
def VFN(self, name, autocall=True):
return valueForName(self.namespace(), name, autocall)
def VFS(self, searchList, name, autocall=True):
return valueFromSearchList(searchList, name, autocall)
# alias to be overriden later
get = VFN
def check(self, name):
got = self.get(name)
if name in autoCallResults:
expected = autoCallResults[name]
else:
expected = self._results[name]
assert got == expected
##################################################
## TEST CASE CLASSES
class VFN(NameMapperTest):
def test1(self):
"""string in dict lookup"""
self.check('aStr')
def test2(self):
"""string in dict lookup in a loop"""
for i in range(10):
self.check('aStr')
def test3(self):
"""int in dict lookup"""
self.check('anInt')
def test4(self):
"""int in dict lookup in a loop"""
for i in range(10):
self.check('anInt')
def test5(self):
"""float in dict lookup"""
self.check('aFloat')
def test6(self):
"""float in dict lookup in a loop"""
for i in range(10):
self.check('aFloat')
def test7(self):
"""class in dict lookup"""
self.check('aClass')
def test8(self):
"""class in dict lookup in a loop"""
for i in range(10):
self.check('aClass')
def test9(self):
"""aFunc in dict lookup"""
self.check('aFunc')
def test10(self):
"""aFunc in dict lookup in a loop"""
for i in range(10):
self.check('aFunc')
def test11(self):
"""aMeth in dict lookup"""
self.check('aMeth')
def test12(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('aMeth')
def test13(self):
"""aMeth in dict lookup"""
self.check('aMeth')
def test14(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('aMeth')
def test15(self):
"""anObj in dict lookup"""
self.check('anObj')
def test16(self):
"""anObj in dict lookup in a loop"""
for i in range(10):
self.check('anObj')
def test17(self):
"""aDict in dict lookup"""
self.check('aDict')
def test18(self):
"""aDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict')
def test17(self):
"""aDict in dict lookup"""
self.check('aDict')
def test18(self):
"""aDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict')
def test19(self):
"""aClass.classVar1 in dict lookup"""
self.check('aClass.classVar1')
def test20(self):
"""aClass.classVar1 in dict lookup in a loop"""
for i in range(10):
self.check('aClass.classVar1')
def test23(self):
"""anObj.instanceVar1 in dict lookup"""
self.check('anObj.instanceVar1')
def test24(self):
"""anObj.instanceVar1 in dict lookup in a loop"""
for i in range(10):
self.check('anObj.instanceVar1')
## tests 22, 25, and 26 removed when the underscored lookup was removed
def test27(self):
"""anObj.meth1 in dict lookup"""
self.check('anObj.meth1')
def test28(self):
"""anObj.meth1 in dict lookup in a loop"""
for i in range(10):
self.check('anObj.meth1')
def test29(self):
"""aDict.one in dict lookup"""
self.check('aDict.one')
def test30(self):
"""aDict.one in dict lookup in a loop"""
for i in range(10):
self.check('aDict.one')
def test31(self):
"""aDict.nestedDict in dict lookup"""
self.check('aDict.nestedDict')
def test32(self):
"""aDict.nestedDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict')
def test33(self):
"""aDict.nestedDict.one in dict lookup"""
self.check('aDict.nestedDict.one')
def test34(self):
"""aDict.nestedDict.one in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict.one')
def test35(self):
"""aDict.nestedFunc in dict lookup"""
self.check('aDict.nestedFunc')
def test36(self):
"""aDict.nestedFunc in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedFunc')
def test37(self):
"""aDict.nestedFunc in dict lookup - without autocalling"""
assert self.get('aDict.nestedFunc', False) == dummyFunc
def test38(self):
"""aDict.nestedFunc in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aDict.nestedFunc', False) == dummyFunc
def test39(self):
"""aMeth in dict lookup - without autocalling"""
assert self.get('aMeth', False) == self.namespace()['aMeth']
def test40(self):
"""aMeth in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aMeth', False) == self.namespace()['aMeth']
def test41(self):
"""anObj.meth3 in dict lookup"""
self.check('anObj.meth3')
def test42(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('anObj.meth3')
def test43(self):
"""NotFound test"""
def test(self=self):
self.get('anObj.methX')
self.assertRaises(NotFound, test)
def test44(self):
"""NotFound test in a loop"""
def test(self=self):
self.get('anObj.methX')
for i in range(10):
self.assertRaises(NotFound, test)
def test45(self):
"""Other exception from meth test"""
def test(self=self):
self.get('anObj.meth2')
self.assertRaises(ValueError, test)
def test46(self):
"""Other exception from meth test in a loop"""
def test(self=self):
self.get('anObj.meth2')
for i in range(10):
self.assertRaises(ValueError, test)
def test47(self):
"""None in dict lookup"""
self.check('none')
def test48(self):
"""None in dict lookup in a loop"""
for i in range(10):
self.check('none')
def test49(self):
"""EmptyString in dict lookup"""
self.check('emptyString')
def test50(self):
"""EmptyString in dict lookup in a loop"""
for i in range(10):
self.check('emptyString')
def test51(self):
"""Other exception from func test"""
def test(self=self):
self.get('funcThatRaises')
self.assertRaises(ValueError, test)
def test52(self):
"""Other exception from func test in a loop"""
def test(self=self):
self.get('funcThatRaises')
for i in range(10):
self.assertRaises(ValueError, test)
def test53(self):
"""Other exception from func test"""
def test(self=self):
self.get('aDict.nestedDict.funcThatRaises')
self.assertRaises(ValueError, test)
def test54(self):
"""Other exception from func test in a loop"""
def test(self=self):
self.get('aDict.nestedDict.funcThatRaises')
for i in range(10):
self.assertRaises(ValueError, test)
def test55(self):
"""aDict.nestedDict.aClass in dict lookup"""
self.check('aDict.nestedDict.aClass')
def test56(self):
"""aDict.nestedDict.aClass in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict.aClass')
def test57(self):
"""aDict.nestedDict.aClass in dict lookup - without autocalling"""
assert self.get('aDict.nestedDict.aClass', False) == DummyClass
def test58(self):
"""aDict.nestedDict.aClass in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aDict.nestedDict.aClass', False) == DummyClass
def test59(self):
"""Other exception from func test -- but without autocalling shouldn't raise"""
self.get('aDict.nestedDict.funcThatRaises', False)
def test60(self):
"""Other exception from func test in a loop -- but without autocalling shouldn't raise"""
for i in range(10):
self.get('aDict.nestedDict.funcThatRaises', False)
class VFS(VFN):
_searchListLength = 1
def searchList(self):
lng = self._searchListLength
if lng == 1:
return [self.namespace()]
elif lng == 2:
return [self.namespace(), {'dummy':1234}]
elif lng == 3:
# a tuple for kicks
return ({'dummy':1234}, self.namespace(), {'dummy':1234})
elif lng == 4:
# a generator for more kicks
return self.searchListGenerator()
def searchListGenerator(self):
class Test:
pass
for i in [Test(), {'dummy':1234}, self.namespace(), {'dummy':1234}]:
yield i
def get(self, name, autocall=True):
return self.VFS(self.searchList(), name, autocall)
class VFS_2namespaces(VFS):
_searchListLength = 2
class VFS_3namespaces(VFS):
_searchListLength = 3
class VFS_4namespaces(VFS):
_searchListLength = 4
class VFF(VFN):
def get(self, name, autocall=True):
ns = self._testNamespace
aStr = ns['aStr']
aFloat = ns['aFloat']
none = 'some'
return valueFromFrame(name, autocall)
def setUp(self):
"""Mod some of the data
"""
self._testNamespace = ns = self._testNamespace.copy()
self._results = res = self._results.copy()
ns['aStr'] = res['aStr'] = 'BLARG'
ns['aFloat'] = res['aFloat'] = 0.1234
res['none'] = 'some'
res['True'] = True
res['False'] = False
res['None'] = None
res['eval'] = eval
def test_VFF_1(self):
"""Builtins"""
self.check('True')
self.check('None')
self.check('False')
assert self.get('eval', False)==eval
assert self.get('range', False)==range
class VFFSL(VFS):
_searchListLength = 1
def setUp(self):
"""Mod some of the data
"""
self._testNamespace = ns = self._testNamespace.copy()
self._results = res = self._results.copy()
ns['aStr'] = res['aStr'] = 'BLARG'
ns['aFloat'] = res['aFloat'] = 0.1234
res['none'] = 'some'
del ns['anInt'] # will be picked up by globals
def VFFSL(self, searchList, name, autocall=True):
anInt = 1
none = 'some'
return valueFromFrameOrSearchList(searchList, name, autocall)
def get(self, name, autocall=True):
return self.VFFSL(self.searchList(), name, autocall)
class VFFSL_2(VFFSL):
_searchListLength = 2
class VFFSL_3(VFFSL):
_searchListLength = 3
class VFFSL_4(VFFSL):
_searchListLength = 4
if sys.platform.startswith('java'):
del VFF, VFFSL, VFFSL_2, VFFSL_3, VFFSL_4
##################################################
## if run from the command line ##
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import pdb
import sys
import types
import os
import os.path
import tempfile
import shutil
import unittest
from Cheetah.Template import Template
majorVer, minorVer = sys.version_info[0], sys.version_info[1]
versionTuple = (majorVer, minorVer)
class TemplateTest(unittest.TestCase):
pass
class ClassMethods_compile(TemplateTest):
"""I am using the same Cheetah source for each test to root out clashes
caused by the compile caching in Template.compile().
"""
def test_basicUsage(self):
klass = Template.compile(source='$foo')
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
def test_baseclassArg(self):
klass = Template.compile(source='$foo', baseclass=dict)
t = klass({'foo':1234})
assert str(t)=='1234'
klass2 = Template.compile(source='$foo', baseclass=klass)
t = klass2({'foo':1234})
assert str(t)=='1234'
klass3 = Template.compile(source='#implements dummy\n$bar', baseclass=klass2)
t = klass3({'foo':1234})
assert str(t)=='1234'
klass4 = Template.compile(source='$foo', baseclass='dict')
t = klass4({'foo':1234})
assert str(t)=='1234'
def test_moduleFileCaching(self):
if versionTuple < (2, 3):
return
tmpDir = tempfile.mkdtemp()
try:
#print tmpDir
assert os.path.exists(tmpDir)
klass = Template.compile(source='$foo',
cacheModuleFilesForTracebacks=True,
cacheDirForModuleFiles=tmpDir)
mod = sys.modules[klass.__module__]
#print mod.__file__
assert os.path.exists(mod.__file__)
assert os.path.dirname(mod.__file__)==tmpDir
finally:
shutil.rmtree(tmpDir, True)
def test_classNameArg(self):
klass = Template.compile(source='$foo', className='foo123')
assert klass.__name__=='foo123'
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
def test_moduleNameArg(self):
klass = Template.compile(source='$foo', moduleName='foo99')
mod = sys.modules['foo99']
assert klass.__name__=='foo99'
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
klass = Template.compile(source='$foo',
moduleName='foo1',
className='foo2')
mod = sys.modules['foo1']
assert klass.__name__=='foo2'
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
def test_mainMethodNameArg(self):
klass = Template.compile(source='$foo',
className='foo123',
mainMethodName='testMeth')
assert klass.__name__=='foo123'
t = klass(namespaces={'foo':1234})
#print t.generatedClassCode()
assert str(t)=='1234'
assert t.testMeth()=='1234'
klass = Template.compile(source='$foo',
moduleName='fooXXX',
className='foo123',
mainMethodName='testMeth',
baseclass=dict)
assert klass.__name__=='foo123'
t = klass({'foo':1234})
#print t.generatedClassCode()
assert str(t)=='1234'
assert t.testMeth()=='1234'
def test_moduleGlobalsArg(self):
klass = Template.compile(source='$foo',
moduleGlobals={'foo':1234})
t = klass()
assert str(t)=='1234'
klass2 = Template.compile(source='$foo', baseclass='Test1',
moduleGlobals={'Test1':dict})
t = klass2({'foo':1234})
assert str(t)=='1234'
klass3 = Template.compile(source='$foo', baseclass='Test1',
moduleGlobals={'Test1':dict, 'foo':1234})
t = klass3()
assert str(t)=='1234'
def test_keepRefToGeneratedCodeArg(self):
klass = Template.compile(source='$foo',
className='unique58',
cacheCompilationResults=False,
keepRefToGeneratedCode=False)
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
assert not t.generatedModuleCode()
klass2 = Template.compile(source='$foo',
className='unique58',
keepRefToGeneratedCode=True)
t = klass2(namespaces={'foo':1234})
assert str(t)=='1234'
assert t.generatedModuleCode()
klass3 = Template.compile(source='$foo',
className='unique58',
keepRefToGeneratedCode=False)
t = klass3(namespaces={'foo':1234})
assert str(t)=='1234'
# still there as this class came from the cache
assert t.generatedModuleCode()
def test_compilationCache(self):
klass = Template.compile(source='$foo',
className='unique111',
cacheCompilationResults=False)
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
assert not klass._CHEETAH_isInCompilationCache
# this time it will place it in the cache
klass = Template.compile(source='$foo',
className='unique111',
cacheCompilationResults=True)
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
assert klass._CHEETAH_isInCompilationCache
# by default it will be in the cache
klass = Template.compile(source='$foo',
className='unique999099')
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
assert klass._CHEETAH_isInCompilationCache
class ClassMethods_subclass(TemplateTest):
def test_basicUsage(self):
klass = Template.compile(source='$foo', baseclass=dict)
t = klass({'foo':1234})
assert str(t)=='1234'
klass2 = klass.subclass(source='$foo')
t = klass2({'foo':1234})
assert str(t)=='1234'
klass3 = klass2.subclass(source='#implements dummy\n$bar')
t = klass3({'foo':1234})
assert str(t)=='1234'
class Preprocessors(TemplateTest):
def test_basicUsage1(self):
src='''\
%set foo = @a
$(@foo*10)
@a'''
src = '\n'.join([ln.strip() for ln in src.splitlines()])
preprocessors = {'tokens':'@ %',
'namespaces':{'a':99}
}
klass = Template.compile(src, preprocessors=preprocessors)
assert str(klass())=='990\n99'
def test_normalizePreprocessorArgVariants(self):
src='%set foo = 12\n%%comment\n$(@foo*10)'
class Settings1: tokens = '@ %'
Settings1 = Settings1()
from Cheetah.Template import TemplatePreprocessor
settings = Template._normalizePreprocessorSettings(Settings1)
preprocObj = TemplatePreprocessor(settings)
def preprocFunc(source, file):
return '$(12*10)', None
class TemplateSubclass(Template):
pass
compilerSettings = {'cheetahVarStartToken': '@',
'directiveStartToken': '%',
'commentStartToken': '%%',
}
for arg in ['@ %',
{'tokens':'@ %'},
{'compilerSettings':compilerSettings},
{'compilerSettings':compilerSettings,
'templateInitArgs':{}},
{'tokens':'@ %',
'templateAPIClass':TemplateSubclass},
Settings1,
preprocObj,
preprocFunc,
]:
klass = Template.compile(src, preprocessors=arg)
assert str(klass())=='120'
def test_complexUsage(self):
src='''\
%set foo = @a
%def func1: #def func(arg): $arg("***")
%% comment
$(@foo*10)
@func1
$func(lambda x:c"--$x--@a")'''
src = '\n'.join([ln.strip() for ln in src.splitlines()])
for arg in [{'tokens':'@ %', 'namespaces':{'a':99} },
{'tokens':'@ %', 'namespaces':{'a':99} },
]:
klass = Template.compile(src, preprocessors=arg)
t = klass()
assert str(t)=='990\n--***--99'
def test_i18n(self):
src='''\
%i18n: This is a $string that needs translation
%i18n id="foo", domain="root": This is a $string that needs translation
'''
src = '\n'.join([ln.strip() for ln in src.splitlines()])
klass = Template.compile(src, preprocessors='@ %', baseclass=dict)
t = klass({'string':'bit of text'})
#print str(t), repr(str(t))
assert str(t)==('This is a bit of text that needs translation\n'*2)[:-1]
class TryExceptImportTest(TemplateTest):
def test_FailCase(self):
''' Test situation where an inline #import statement will get relocated '''
source = '''
#def myFunction()
Ahoy!
#try
#import sys
#except ImportError
$print "This will never happen!"
#end try
#end def
'''
# This should raise an IndentationError (if the bug exists)
klass = Template.compile(source=source, compilerSettings={'useLegacyImportMode' : False})
t = klass(namespaces={'foo' : 1234})
class ClassMethodSupport(TemplateTest):
def test_BasicDecorator(self):
if sys.version_info[0] == 2 and sys.version_info[1] == 3:
print('This version of Python doesn\'t support decorators, skipping tests')
return
template = '''
#@classmethod
#def myClassMethod()
#return '$foo = %s' % $foo
#end def
'''
template = Template.compile(source=template)
try:
rc = template.myClassMethod(foo='bar')
assert rc == '$foo = bar', (rc, 'Template class method didn\'t return what I expected')
except AttributeError, ex:
self.fail(ex)
class StaticMethodSupport(TemplateTest):
def test_BasicDecorator(self):
if sys.version_info[0] == 2 and sys.version_info[1] == 3:
print('This version of Python doesn\'t support decorators, skipping tests')
return
template = '''
#@staticmethod
#def myStaticMethod()
#return '$foo = %s' % $foo
#end def
'''
template = Template.compile(source=template)
try:
rc = template.myStaticMethod(foo='bar')
assert rc == '$foo = bar', (rc, 'Template class method didn\'t return what I expected')
except AttributeError, ex:
self.fail(ex)
class Useless(object):
def boink(self):
return [1, 2, 3]
class MultipleInheritanceSupport(TemplateTest):
def runTest(self):
template = '''
#extends Template, Useless
#def foo()
#return [4,5] + $boink()
#end def
'''
template = Template.compile(template,
moduleGlobals={'Useless' : Useless},
compilerSettings={'autoImportForExtendsDirective' : False})
template = template()
result = template.foo()
assert result == [4, 5, 1, 2, 3], (result, 'Unexpected result')
##################################################
## if run from the command line ##
if __name__ == '__main__':
unittest.main()
| Python |
import Cheetah.Template
def render(template_file, **kwargs):
'''
Cheetah.Django.render() takes the template filename
(the filename should be a file in your Django
TEMPLATE_DIRS)
Any additional keyword arguments are passed into the
template are propogated into the template's searchList
'''
import django.http
import django.template.loader
source, loader = django.template.loader.find_template_source(template_file)
t = Cheetah.Template.Template(source, searchList=[kwargs])
return django.http.HttpResponse(t.__str__())
| Python |
# $Id: NameMapper.py,v 1.32 2007/12/10 19:20:09 tavis_rudd Exp $
"""This module supports Cheetah's optional NameMapper syntax.
Overview
================================================================================
NameMapper provides a simple syntax for accessing Python data structures,
functions, and methods from Cheetah. It's called NameMapper because it 'maps'
simple 'names' in Cheetah templates to possibly more complex syntax in Python.
Its purpose is to make working with Cheetah easy for non-programmers.
Specifically, non-programmers using Cheetah should NOT need to be taught (a)
what the difference is between an object and a dictionary, (b) what functions
and methods are, and (c) what 'self' is. A further aim (d) is to buffer the
code in Cheetah templates from changes in the implementation of the Python data
structures behind them.
Consider this scenario:
You are building a customer information system. The designers with you want to
use information from your system on the client's website --AND-- they want to
understand the display code and so they can maintian it themselves.
You write a UI class with a 'customers' method that returns a dictionary of all
the customer objects. Each customer object has an 'address' method that returns
the a dictionary with information about the customer's address. The designers
want to be able to access that information.
Using PSP, the display code for the website would look something like the
following, assuming your servlet subclasses the class you created for managing
customer information:
<%= self.customer()[ID].address()['city'] %> (42 chars)
Using Cheetah's NameMapper syntax it could be any of the following:
$self.customers()[$ID].address()['city'] (39 chars)
--OR--
$customers()[$ID].address()['city']
--OR--
$customers()[$ID].address().city
--OR--
$customers()[$ID].address.city
--OR--
$customers()[$ID].address.city
--OR--
$customers[$ID].address.city (27 chars)
Which of these would you prefer to explain to the designers, who have no
programming experience? The last form is 15 characters shorter than the PSP
and, conceptually, is far more accessible. With PHP or ASP, the code would be
even messier than the PSP
This is a rather extreme example and, of course, you could also just implement
'$getCustomer($ID).city' and obey the Law of Demeter (search Google for more on that).
But good object orientated design isn't the point here.
Details
================================================================================
The parenthesized letters below correspond to the aims in the second paragraph.
DICTIONARY ACCESS (a)
---------------------
NameMapper allows access to items in a dictionary using the same dotted notation
used to access object attributes in Python. This aspect of NameMapper is known
as 'Unified Dotted Notation'.
For example, with Cheetah it is possible to write:
$customers()['kerr'].address() --OR-- $customers().kerr.address()
where the second form is in NameMapper syntax.
This only works with dictionary keys that are also valid python identifiers:
regex = '[a-zA-Z_][a-zA-Z_0-9]*'
AUTOCALLING (b,d)
-----------------
NameMapper automatically detects functions and methods in Cheetah $vars and calls
them if the parentheses have been left off.
For example if 'a' is an object, 'b' is a method
$a.b
is equivalent to
$a.b()
If b returns a dictionary, then following variations are possible
$a.b.c --OR-- $a.b().c --OR-- $a.b()['c']
where 'c' is a key in the dictionary that a.b() returns.
Further notes:
* NameMapper autocalls the function or method without any arguments. Thus
autocalling can only be used with functions or methods that either have no
arguments or have default values for all arguments.
* NameMapper only autocalls functions and methods. Classes and callable object instances
will not be autocalled.
* Autocalling can be disabled using Cheetah's 'useAutocalling' setting.
LEAVING OUT 'self' (c,d)
------------------------
NameMapper makes it possible to access the attributes of a servlet in Cheetah
without needing to include 'self' in the variable names. See the NAMESPACE
CASCADING section below for details.
NAMESPACE CASCADING (d)
--------------------
...
Implementation details
================================================================================
* NameMapper's search order is dictionary keys then object attributes
* NameMapper.NotFound is raised if a value can't be found for a name.
Performance and the C version
================================================================================
Cheetah comes with both a C version and a Python version of NameMapper. The C
version is significantly faster and the exception tracebacks are much easier to
read. It's still slower than standard Python syntax, but you won't notice the
difference in realistic usage scenarios.
Cheetah uses the optimized C version (_namemapper.c) if it has
been compiled or falls back to the Python version if not.
Meta-Data
================================================================================
Authors: Tavis Rudd <tavis@damnsimple.com>,
Chuck Esterbrook <echuck@mindspring.com>
Version: $Revision: 1.32 $
Start Date: 2001/04/03
Last Revision Date: $Date: 2007/12/10 19:20:09 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>," +\
"\nChuck Esterbrook <echuck@mindspring.com>"
__revision__ = "$Revision: 1.32 $"[11:-2]
import types
from types import StringType, InstanceType, ClassType, TypeType
from pprint import pformat
import inspect
import pdb
_INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS = False
_ALLOW_WRAPPING_OF_NOTFOUND_EXCEPTIONS = True
__all__ = ['NotFound',
'hasKey',
'valueForKey',
'valueForName',
'valueFromSearchList',
'valueFromFrameOrSearchList',
'valueFromFrame',
]
if not hasattr(inspect.imp, 'get_suffixes'):
# This is to fix broken behavior of the inspect module under the
# Google App Engine, see the following issue:
# http://bugs.communitycheetah.org/view.php?id=10
setattr(inspect.imp, 'get_suffixes', lambda: [('.py', 'U', 1)])
## N.B. An attempt is made at the end of this module to import C versions of
## these functions. If _namemapper.c has been compiled succesfully and the
## import goes smoothly, the Python versions defined here will be replaced with
## the C versions.
class NotFound(LookupError):
pass
def _raiseNotFoundException(key, namespace):
excString = "cannot find '%s'"%key
if _INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS:
excString += ' in the namespace %s'%pformat(namespace)
raise NotFound(excString)
def _wrapNotFoundException(exc, fullName, namespace):
if not _ALLOW_WRAPPING_OF_NOTFOUND_EXCEPTIONS:
raise
else:
excStr = exc.args[0]
if excStr.find('while searching')==-1: # only wrap once!
excStr +=" while searching for '%s'"%fullName
if _INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS:
excStr += ' in the namespace %s'%pformat(namespace)
exc.args = (excStr,)
raise
def _isInstanceOrClass(obj):
if type(obj) in (InstanceType, ClassType):
# oldstyle
return True
if hasattr(obj, "__class__"):
# newstyle
if hasattr(obj, 'mro'):
# type/class
return True
elif (hasattr(obj, 'im_func') or hasattr(obj, 'func_code') or hasattr(obj, '__self__')):
# method, func, or builtin func
return False
elif hasattr(obj, '__init__'):
# instance
return True
return False
def hasKey(obj, key):
"""Determine if 'obj' has 'key' """
if hasattr(obj, 'has_key') and key in obj:
return True
elif hasattr(obj, key):
return True
else:
return False
def valueForKey(obj, key):
if hasattr(obj, 'has_key') and key in obj:
return obj[key]
elif hasattr(obj, key):
return getattr(obj, key)
else:
_raiseNotFoundException(key, obj)
def _valueForName(obj, name, executeCallables=False):
nameChunks=name.split('.')
for i in range(len(nameChunks)):
key = nameChunks[i]
## BEGIN HACK for getattr() first, then 'has_key':
try:
nextObj = getattr(obj, key)
except AttributeError:
try:
nextObj = obj[key]
except TypeError:
_raiseNotFoundException(key, obj)
## END HACK
## BEGIN ORIGINAL CODE
#if hasattr(obj, 'has_key') and key in obj:
# nextObj = obj[key]
#else:
# try:
# nextObj = getattr(obj, key)
# except AttributeError:
# _raiseNotFoundException(key, obj)
## END ORIGINAL CODE
if executeCallables and hasattr(nextObj, '__call__') and not _isInstanceOrClass(nextObj):
obj = nextObj()
else:
obj = nextObj
return obj
def valueForName(obj, name, executeCallables=False):
try:
return _valueForName(obj, name, executeCallables)
except NotFound, e:
_wrapNotFoundException(e, fullName=name, namespace=obj)
def valueFromSearchList(searchList, name, executeCallables=False):
key = name.split('.')[0]
for namespace in searchList:
if hasKey(namespace, key):
return _valueForName(namespace, name,
executeCallables=executeCallables)
_raiseNotFoundException(key, searchList)
def _namespaces(callerFrame, searchList=None):
yield callerFrame.f_locals
if searchList:
for namespace in searchList:
yield namespace
yield callerFrame.f_globals
yield __builtins__
def valueFromFrameOrSearchList(searchList, name, executeCallables=False,
frame=None):
def __valueForName():
try:
return _valueForName(namespace, name, executeCallables=executeCallables)
except NotFound, e:
_wrapNotFoundException(e, fullName=name, namespace=searchList)
try:
if not frame:
frame = inspect.stack()[1][0]
key = name.split('.')[0]
for namespace in _namespaces(frame, searchList):
if hasKey(namespace, key):
return __valueForName()
_raiseNotFoundException(key, searchList)
finally:
del frame
def valueFromFrame(name, executeCallables=False, frame=None):
# @@TR consider implementing the C version the same way
# at the moment it provides a seperate but mirror implementation
# to valueFromFrameOrSearchList
try:
if not frame:
frame = inspect.stack()[1][0]
return valueFromFrameOrSearchList(searchList=None,
name=name,
executeCallables=executeCallables,
frame=frame)
finally:
del frame
def hasName(obj, name):
#Not in the C version
"""Determine if 'obj' has the 'name' """
key = name.split('.')[0]
if not hasKey(obj, key):
return False
try:
valueForName(obj, name)
return True
except NotFound:
return False
try:
from _namemapper import NotFound, valueForKey, valueForName, \
valueFromSearchList, valueFromFrameOrSearchList, valueFromFrame
# it is possible with Jython or Windows, for example, that _namemapper.c hasn't been compiled
C_VERSION = True
except:
C_VERSION = False
##################################################
## CLASSES
class Mixin:
"""@@ document me"""
def valueForName(self, name):
return valueForName(self, name)
def valueForKey(self, key):
return valueForKey(self, key)
##################################################
## if run from the command line ##
def example():
class A(Mixin):
classVar = 'classVar val'
def method(self,arg='method 1 default arg'):
return arg
def method2(self, arg='meth 2 default arg'):
return {'item1':arg}
def method3(self, arg='meth 3 default'):
return arg
class B(A):
classBvar = 'classBvar val'
a = A()
a.one = 'valueForOne'
def function(whichOne='default'):
values = {
'default': 'default output',
'one': 'output option one',
'two': 'output option two'
}
return values[whichOne]
a.dic = {
'func': function,
'method': a.method3,
'item': 'itemval',
'subDict': {'nestedMethod':a.method3}
}
b = 'this is local b'
print(valueForKey(a.dic, 'subDict'))
print(valueForName(a, 'dic.item'))
print(valueForName(vars(), 'b'))
print(valueForName(__builtins__, 'dir')())
print(valueForName(vars(), 'a.classVar'))
print(valueForName(vars(), 'a.dic.func', executeCallables=True))
print(valueForName(vars(), 'a.method2.item1', executeCallables=True))
if __name__ == '__main__':
example()
| Python |
'''
Provides the core API for Cheetah.
See the docstring in the Template class and the Users' Guide for more information
'''
################################################################################
## DEPENDENCIES
import sys # used in the error handling code
import re # used to define the internal delims regex
import new # used to bind methods and create dummy modules
import logging
import string
import os.path
import time # used in the cache refresh code
from random import randrange
import imp
import inspect
import StringIO
import traceback
import pprint
import cgi # Used by .webInput() if the template is a CGI script.
import types
from types import StringType, ClassType
try:
from types import StringTypes
except ImportError:
StringTypes = (types.StringType, types.UnicodeType)
try:
from threading import Lock
except ImportError:
class Lock:
def acquire(self):
pass
def release(self):
pass
try:
x = set()
except NameError:
# Python 2.3 compatibility
from sets import Set as set
from Cheetah.Version import convertVersionStringToTuple, MinCompatibleVersionTuple
from Cheetah.Version import MinCompatibleVersion
# Base classes for Template
from Cheetah.Servlet import Servlet
# More intra-package imports ...
from Cheetah.Parser import ParseError, SourceReader
from Cheetah.Compiler import Compiler, DEFAULT_COMPILER_SETTINGS
from Cheetah import ErrorCatchers # for placeholder tags
from Cheetah import Filters # the output filters
from Cheetah.convertTmplPathToModuleName import convertTmplPathToModuleName
from Cheetah.Utils.Misc import checkKeywords # Used in Template.__init__
from Cheetah.Utils.Indenter import Indenter # Used in Template.__init__ and for
# placeholders
from Cheetah.NameMapper import NotFound, valueFromSearchList
from Cheetah.CacheStore import MemoryCacheStore, MemcachedCacheStore
from Cheetah.CacheRegion import CacheRegion
from Cheetah.Utils.WebInputMixin import _Converter, _lookup, NonNumericInputError
from Cheetah.Unspecified import Unspecified
# Decide whether to use the file modification time in file's cache key
__checkFileMtime = True
def checkFileMtime(value):
globals()['__checkFileMtime'] = value
class Error(Exception):
pass
class PreprocessError(Error):
pass
def hashList(l):
hashedList = []
for v in l:
if isinstance(v, dict):
v = hashDict(v)
elif isinstance(v, list):
v = hashList(v)
hashedList.append(v)
return hash(tuple(hashedList))
def hashDict(d):
items = sorted(d.items())
hashedList = []
for k, v in items:
if isinstance(v, dict):
v = hashDict(v)
elif isinstance(v, list):
v = hashList(v)
hashedList.append((k, v))
return hash(tuple(hashedList))
################################################################################
## MODULE GLOBALS AND CONSTANTS
def _genUniqueModuleName(baseModuleName):
"""The calling code is responsible for concurrency locking.
"""
if baseModuleName not in sys.modules:
finalName = baseModuleName
else:
finalName = ('cheetah_%s_%s_%s'%(baseModuleName,
str(time.time()).replace('.', '_'),
str(randrange(10000, 99999))))
return finalName
# Cache of a cgi.FieldStorage() instance, maintained by .webInput().
# This is only relavent to templates used as CGI scripts.
_formUsedByWebInput = None
def updateLinecache(filename, src):
import linecache
size = len(src)
mtime = time.time()
lines = src.splitlines()
fullname = filename
linecache.cache[filename] = size, mtime, lines, fullname
class CompileCacheItem(object):
pass
class TemplatePreprocessor(object):
'''
This is used with the preprocessors argument to Template.compile().
See the docstring for Template.compile
** Preprocessors are an advanced topic **
'''
def __init__(self, settings):
self._settings = settings
def preprocess(self, source, file):
"""Create an intermediate template and return the source code
it outputs
"""
settings = self._settings
if not source: # @@TR: this needs improving
if isinstance(file, (str, unicode)): # it's a filename.
f = open(file)
source = f.read()
f.close()
elif hasattr(file, 'read'):
source = file.read()
file = None
templateAPIClass = settings.templateAPIClass
possibleKwArgs = [
arg for arg in
inspect.getargs(templateAPIClass.compile.im_func.func_code)[0]
if arg not in ('klass', 'source', 'file',)]
compileKwArgs = {}
for arg in possibleKwArgs:
if hasattr(settings, arg):
compileKwArgs[arg] = getattr(settings, arg)
tmplClass = templateAPIClass.compile(source=source, file=file, **compileKwArgs)
tmplInstance = tmplClass(**settings.templateInitArgs)
outputSource = settings.outputTransformer(tmplInstance)
outputFile = None
return outputSource, outputFile
class Template(Servlet):
'''
This class provides a) methods used by templates at runtime and b)
methods for compiling Cheetah source code into template classes.
This documentation assumes you already know Python and the basics of object
oriented programming. If you don't know Python, see the sections of the
Cheetah Users' Guide for non-programmers. It also assumes you have read
about Cheetah's syntax in the Users' Guide.
The following explains how to use Cheetah from within Python programs or via
the interpreter. If you statically compile your templates on the command
line using the 'cheetah' script, this is not relevant to you. Statically
compiled Cheetah template modules/classes (e.g. myTemplate.py:
MyTemplateClasss) are just like any other Python module or class. Also note,
most Python web frameworks (Webware, Aquarium, mod_python, Turbogears,
CherryPy, Quixote, etc.) provide plugins that handle Cheetah compilation for
you.
There are several possible usage patterns:
1) tclass = Template.compile(src)
t1 = tclass() # or tclass(namespaces=[namespace,...])
t2 = tclass() # or tclass(namespaces=[namespace2,...])
outputStr = str(t1) # or outputStr = t1.aMethodYouDefined()
Template.compile provides a rich and very flexible API via its
optional arguments so there are many possible variations of this
pattern. One example is:
tclass = Template.compile('hello $name from $caller', baseclass=dict)
print tclass(name='world', caller='me')
See the Template.compile() docstring for more details.
2) tmplInstance = Template(src)
# or Template(src, namespaces=[namespace,...])
outputStr = str(tmplInstance) # or outputStr = tmplInstance.aMethodYouDefined(...args...)
Notes on the usage patterns:
usage pattern 1)
This is the most flexible, but it is slightly more verbose unless you
write a wrapper function to hide the plumbing. Under the hood, all
other usage patterns are based on this approach. Templates compiled
this way can #extend (subclass) any Python baseclass: old-style or
new-style (based on object or a builtin type).
usage pattern 2)
This was Cheetah's original usage pattern. It returns an instance,
but you can still access the generated class via
tmplInstance.__class__. If you want to use several different
namespace 'searchLists' with a single template source definition,
you're better off with Template.compile (1).
Limitations (use pattern 1 instead):
- Templates compiled this way can only #extend subclasses of the
new-style 'object' baseclass. Cheetah.Template is a subclass of
'object'. You also can not #extend dict, list, or other builtin
types.
- If your template baseclass' __init__ constructor expects args there
is currently no way to pass them in.
If you need to subclass a dynamically compiled Cheetah class, do something like this:
from Cheetah.Template import Template
T1 = Template.compile('$meth1 #def meth1: this is meth1 in T1')
T2 = Template.compile('#implements meth1\nthis is meth1 redefined in T2', baseclass=T1)
print T1, T1()
print T2, T2()
Note about class and instance attribute names:
Attributes used by Cheetah have a special prefix to avoid confusion with
the attributes of the templates themselves or those of template
baseclasses.
Class attributes which are used in class methods look like this:
klass._CHEETAH_useCompilationCache (_CHEETAH_xxx)
Instance attributes look like this:
klass._CHEETAH__globalSetVars (_CHEETAH__xxx with 2 underscores)
'''
# this is used by ._addCheetahPlumbingCodeToClass()
_CHEETAH_requiredCheetahMethods = (
'_initCheetahInstance',
'searchList',
'errorCatcher',
'getVar',
'varExists',
'getFileContents',
'i18n',
'runAsMainProgram',
'respond',
'shutdown',
'webInput',
'serverSidePath',
'generatedClassCode',
'generatedModuleCode',
'_getCacheStore',
'_getCacheStoreIdPrefix',
'_createCacheRegion',
'getCacheRegion',
'getCacheRegions',
'refreshCache',
'_handleCheetahInclude',
'_getTemplateAPIClassForIncludeDirectiveCompilation',
)
_CHEETAH_requiredCheetahClassMethods = ('subclass',)
_CHEETAH_requiredCheetahClassAttributes = ('cacheRegionClass', 'cacheStore',
'cacheStoreIdPrefix', 'cacheStoreClass')
## the following are used by .compile(). Most are documented in its docstring.
_CHEETAH_cacheModuleFilesForTracebacks = False
_CHEETAH_cacheDirForModuleFiles = None # change to a dirname
_CHEETAH_compileCache = dict() # cache store for compiled code and classes
# To do something other than simple in-memory caching you can create an
# alternative cache store. It just needs to support the basics of Python's
# mapping/dict protocol. E.g.:
# class AdvCachingTemplate(Template):
# _CHEETAH_compileCache = MemoryOrFileCache()
_CHEETAH_compileLock = Lock() # used to prevent race conditions
_CHEETAH_defaultMainMethodName = None
_CHEETAH_compilerSettings = None
_CHEETAH_compilerClass = Compiler
_CHEETAH_compilerInstance = None
_CHEETAH_cacheCompilationResults = True
_CHEETAH_useCompilationCache = True
_CHEETAH_keepRefToGeneratedCode = True
_CHEETAH_defaultBaseclassForTemplates = None
_CHEETAH_defaultClassNameForTemplates = None
# defaults to DEFAULT_COMPILER_SETTINGS['mainMethodName']:
_CHEETAH_defaultMainMethodNameForTemplates = None
_CHEETAH_defaultModuleNameForTemplates = 'DynamicallyCompiledCheetahTemplate'
_CHEETAH_defaultModuleGlobalsForTemplates = None
_CHEETAH_preprocessors = None
_CHEETAH_defaultPreprocessorClass = TemplatePreprocessor
## The following attributes are used by instance methods:
_CHEETAH_generatedModuleCode = None
NonNumericInputError = NonNumericInputError
_CHEETAH_cacheRegionClass = CacheRegion
_CHEETAH_cacheStoreClass = MemoryCacheStore
#_CHEETAH_cacheStoreClass = MemcachedCacheStore
_CHEETAH_cacheStore = None
_CHEETAH_cacheStoreIdPrefix = None
@classmethod
def _getCompilerClass(klass, source=None, file=None):
return klass._CHEETAH_compilerClass
@classmethod
def _getCompilerSettings(klass, source=None, file=None):
return klass._CHEETAH_compilerSettings
@classmethod
def compile(klass, source=None, file=None,
returnAClass=True,
compilerSettings=Unspecified,
compilerClass=Unspecified,
moduleName=None,
className=Unspecified,
mainMethodName=Unspecified,
baseclass=Unspecified,
moduleGlobals=Unspecified,
cacheCompilationResults=Unspecified,
useCache=Unspecified,
preprocessors=Unspecified,
cacheModuleFilesForTracebacks=Unspecified,
cacheDirForModuleFiles=Unspecified,
commandlineopts=None,
keepRefToGeneratedCode=Unspecified,
):
"""
The core API for compiling Cheetah source code into template classes.
This class method compiles Cheetah source code and returns a python
class. You then create template instances using that class. All
Cheetah's other compilation API's use this method under the hood.
Internally, this method a) parses the Cheetah source code and generates
Python code defining a module with a single class in it, b) dynamically
creates a module object with a unique name, c) execs the generated code
in that module's namespace then inserts the module into sys.modules, and
d) returns a reference to the generated class. If you want to get the
generated python source code instead, pass the argument
returnAClass=False.
It caches generated code and classes. See the descriptions of the
arguments'cacheCompilationResults' and 'useCache' for details. This
doesn't mean that templates will automatically recompile themselves when
the source file changes. Rather, if you call Template.compile(src) or
Template.compile(file=path) repeatedly it will attempt to return a
cached class definition instead of recompiling.
Hooks are provided template source preprocessing. See the notes on the
'preprocessors' arg.
If you are an advanced user and need to customize the way Cheetah parses
source code or outputs Python code, you should check out the
compilerSettings argument.
Arguments:
You must provide either a 'source' or 'file' arg, but not both:
- source (string or None)
- file (string path, file-like object, or None)
The rest of the arguments are strictly optional. All but the first
have defaults in attributes of the Template class which can be
overridden in subclasses of this class. Working with most of these is
an advanced topic.
- returnAClass=True
If false, return the generated module code rather than a class.
- compilerSettings (a dict)
Default: Template._CHEETAH_compilerSettings=None
a dictionary of settings to override those defined in
DEFAULT_COMPILER_SETTINGS. These can also be overridden in your
template source code with the #compiler or #compiler-settings
directives.
- compilerClass (a class)
Default: Template._CHEETAH_compilerClass=Cheetah.Compiler.Compiler
a subclass of Cheetah.Compiler.Compiler. Mucking with this is a
very advanced topic.
- moduleName (a string)
Default:
Template._CHEETAH_defaultModuleNameForTemplates
='DynamicallyCompiledCheetahTemplate'
What to name the generated Python module. If the provided value is
None and a file arg was given, the moduleName is created from the
file path. In all cases if the moduleName provided is already in
sys.modules it is passed through a filter that generates a unique
variant of the name.
- className (a string)
Default: Template._CHEETAH_defaultClassNameForTemplates=None
What to name the generated Python class. If the provided value is
None, the moduleName is use as the class name.
- mainMethodName (a string)
Default:
Template._CHEETAH_defaultMainMethodNameForTemplates
=None (and thus DEFAULT_COMPILER_SETTINGS['mainMethodName'])
What to name the main output generating method in the compiled
template class.
- baseclass (a string or a class)
Default: Template._CHEETAH_defaultBaseclassForTemplates=None
Specifies the baseclass for the template without manually
including an #extends directive in the source. The #extends
directive trumps this arg.
If the provided value is a string you must make sure that a class
reference by that name is available to your template, either by
using an #import directive or by providing it in the arg
'moduleGlobals'.
If the provided value is a class, Cheetah will handle all the
details for you.
- moduleGlobals (a dict)
Default: Template._CHEETAH_defaultModuleGlobalsForTemplates=None
A dict of vars that will be added to the global namespace of the
module the generated code is executed in, prior to the execution
of that code. This should be Python values, not code strings!
- cacheCompilationResults (True/False)
Default: Template._CHEETAH_cacheCompilationResults=True
Tells Cheetah to cache the generated code and classes so that they
can be reused if Template.compile() is called multiple times with
the same source and options.
- useCache (True/False)
Default: Template._CHEETAH_useCompilationCache=True
Should the compilation cache be used? If True and a previous
compilation created a cached template class with the same source
code, compiler settings and other options, the cached template
class will be returned.
- cacheModuleFilesForTracebacks (True/False)
Default: Template._CHEETAH_cacheModuleFilesForTracebacks=False
In earlier versions of Cheetah tracebacks from exceptions that
were raised inside dynamically compiled Cheetah templates were
opaque because Python didn't have access to a python source file
to use in the traceback:
File "xxxx.py", line 192, in getTextiledContent
content = str(template(searchList=searchList))
File "cheetah_yyyy.py", line 202, in __str__
File "cheetah_yyyy.py", line 187, in respond
File "cheetah_yyyy.py", line 139, in writeBody
ZeroDivisionError: integer division or modulo by zero
It is now possible to keep those files in a cache dir and allow
Python to include the actual source lines in tracebacks and makes
them much easier to understand:
File "xxxx.py", line 192, in getTextiledContent
content = str(template(searchList=searchList))
File "/tmp/CheetahCacheDir/cheetah_yyyy.py", line 202, in __str__
def __str__(self): return self.respond()
File "/tmp/CheetahCacheDir/cheetah_yyyy.py", line 187, in respond
self.writeBody(trans=trans)
File "/tmp/CheetahCacheDir/cheetah_yyyy.py", line 139, in writeBody
__v = 0/0 # $(0/0)
ZeroDivisionError: integer division or modulo by zero
- cacheDirForModuleFiles (a string representing a dir path)
Default: Template._CHEETAH_cacheDirForModuleFiles=None
See notes on cacheModuleFilesForTracebacks.
- preprocessors
Default: Template._CHEETAH_preprocessors=None
** THIS IS A VERY ADVANCED TOPIC **
These are used to transform the source code prior to compilation.
They provide a way to use Cheetah as a code generator for Cheetah
code. In other words, you use one Cheetah template to output the
source code for another Cheetah template.
The major expected use cases are:
a) 'compile-time caching' aka 'partial template binding',
wherein an intermediate Cheetah template is used to output
the source for the final Cheetah template. The intermediate
template is a mix of a modified Cheetah syntax (the
'preprocess syntax') and standard Cheetah syntax. The
preprocessor syntax is executed at compile time and outputs
Cheetah code which is then compiled in turn. This approach
allows one to completely soft-code all the elements in the
template which are subject to change yet have it compile to
extremely efficient Python code with everything but the
elements that must be variable at runtime (per browser
request, etc.) compiled as static strings. Examples of this
usage pattern will be added to the Cheetah Users' Guide.
The'preprocess syntax' is just Cheetah's standard one with
alternatives for the $ and # tokens:
e.g. '@' and '%' for code like this
@aPreprocessVar $aRuntimeVar
%if aCompileTimeCondition then yyy else zzz
%% preprocessor comment
#if aRunTimeCondition then aaa else bbb
## normal comment
$aRuntimeVar
b) adding #import and #extends directives dynamically based on
the source
If preprocessors are provided, Cheetah pipes the source code
through each one in the order provided. Each preprocessor should
accept the args (source, file) and should return a tuple (source,
file).
The argument value should be a list, but a single non-list value
is acceptable and will automatically be converted into a list.
Each item in the list will be passed through
Template._normalizePreprocessor(). The items should either match
one of the following forms:
- an object with a .preprocess(source, file) method
- a callable with the following signature:
source, file = f(source, file)
or one of the forms below:
- a single string denoting the 2 'tokens' for the preprocess
syntax. The tokens should be in the order (placeholderToken,
directiveToken) and should separated with a space:
e.g. '@ %'
klass = Template.compile(src, preprocessors='@ %')
# or
klass = Template.compile(src, preprocessors=['@ %'])
- a dict with the following keys or an object with the
following attributes (all are optional, but nothing will
happen if you don't provide at least one):
- tokens: same as the single string described above. You can
also provide a tuple of 2 strings.
- searchList: the searchList used for preprocess $placeholders
- compilerSettings: used in the compilation of the intermediate
template
- templateAPIClass: an optional subclass of `Template`
- outputTransformer: a simple hook for passing in a callable
which can do further transformations of the preprocessor
output, or do something else like debug logging. The
default is str().
+ any keyword arguments to Template.compile which you want to
provide for the compilation of the intermediate template.
klass = Template.compile(src,
preprocessors=[ dict(tokens='@ %', searchList=[...]) ] )
"""
errmsg = "arg '%s' must be %s"
if not isinstance(source, (types.NoneType, basestring)):
raise TypeError(errmsg % ('source', 'string or None'))
if not isinstance(file, (types.NoneType, basestring, types.FileType)):
raise TypeError(errmsg %
('file', 'string, file-like object, or None'))
if baseclass is Unspecified:
baseclass = klass._CHEETAH_defaultBaseclassForTemplates
if isinstance(baseclass, Template):
baseclass = baseclass.__class__
if not isinstance(baseclass, (types.NoneType, basestring, types.ClassType, types.TypeType)):
raise TypeError(errmsg % ('baseclass', 'string, class or None'))
if cacheCompilationResults is Unspecified:
cacheCompilationResults = klass._CHEETAH_cacheCompilationResults
if not isinstance(cacheCompilationResults, (int, bool)):
raise TypeError(errmsg % ('cacheCompilationResults', 'boolean'))
if useCache is Unspecified:
useCache = klass._CHEETAH_useCompilationCache
if not isinstance(useCache, (int, bool)):
raise TypeError(errmsg % ('useCache', 'boolean'))
if compilerSettings is Unspecified:
compilerSettings = klass._getCompilerSettings(source, file) or {}
if not isinstance(compilerSettings, dict):
raise TypeError(errmsg % ('compilerSettings', 'dictionary'))
if compilerClass is Unspecified:
compilerClass = klass._getCompilerClass(source, file)
if preprocessors is Unspecified:
preprocessors = klass._CHEETAH_preprocessors
if keepRefToGeneratedCode is Unspecified:
keepRefToGeneratedCode = klass._CHEETAH_keepRefToGeneratedCode
if not isinstance(keepRefToGeneratedCode, (int, bool)):
raise TypeError(errmsg % ('keepReftoGeneratedCode', 'boolean'))
if not isinstance(moduleName, (types.NoneType, basestring)):
raise TypeError(errmsg % ('moduleName', 'string or None'))
__orig_file__ = None
if not moduleName:
if file and isinstance(file, basestring):
moduleName = convertTmplPathToModuleName(file)
__orig_file__ = file
else:
moduleName = klass._CHEETAH_defaultModuleNameForTemplates
if className is Unspecified:
className = klass._CHEETAH_defaultClassNameForTemplates
if not isinstance(className, (types.NoneType, basestring)):
raise TypeError(errmsg % ('className', 'string or None'))
className = re.sub(r'^_+','', className or moduleName)
if mainMethodName is Unspecified:
mainMethodName = klass._CHEETAH_defaultMainMethodNameForTemplates
if not isinstance(mainMethodName, (types.NoneType, basestring)):
raise TypeError(errmsg % ('mainMethodName', 'string or None'))
if moduleGlobals is Unspecified:
moduleGlobals = klass._CHEETAH_defaultModuleGlobalsForTemplates
if cacheModuleFilesForTracebacks is Unspecified:
cacheModuleFilesForTracebacks = klass._CHEETAH_cacheModuleFilesForTracebacks
if not isinstance(cacheModuleFilesForTracebacks, (int, bool)):
raise TypeError(errmsg %
('cacheModuleFilesForTracebacks', 'boolean'))
if cacheDirForModuleFiles is Unspecified:
cacheDirForModuleFiles = klass._CHEETAH_cacheDirForModuleFiles
if not isinstance(cacheDirForModuleFiles, (types.NoneType, basestring)):
raise TypeError(errmsg %
('cacheDirForModuleFiles', 'string or None'))
##################################################
## handle any preprocessors
if preprocessors:
origSrc = source
source, file = klass._preprocessSource(source, file, preprocessors)
##################################################
## compilation, using cache if requested/possible
baseclassValue = None
baseclassName = None
if baseclass:
if isinstance(baseclass, basestring):
baseclassName = baseclass
elif isinstance(baseclass, (types.ClassType, types.TypeType)):
# @@TR: should soft-code this
baseclassName = 'CHEETAH_dynamicallyAssignedBaseClass_'+baseclass.__name__
baseclassValue = baseclass
cacheHash = None
cacheItem = None
if source or isinstance(file, basestring):
compilerSettingsHash = None
if compilerSettings:
compilerSettingsHash = hashDict(compilerSettings)
moduleGlobalsHash = None
if moduleGlobals:
moduleGlobalsHash = hashDict(moduleGlobals)
fileHash = None
if file:
fileHash = str(hash(file))
if globals()['__checkFileMtime']:
fileHash += str(os.path.getmtime(file))
try:
# @@TR: find some way to create a cacheHash that is consistent
# between process restarts. It would allow for caching the
# compiled module on disk and thereby reduce the startup time
# for applications that use a lot of dynamically compiled
# templates.
cacheHash = ''.join([str(v) for v in
[hash(source),
fileHash,
className,
moduleName,
mainMethodName,
hash(compilerClass),
hash(baseclass),
compilerSettingsHash,
moduleGlobalsHash,
hash(cacheDirForModuleFiles),
]])
except:
#@@TR: should add some logging to this
pass
outputEncoding = 'ascii'
compiler = None
if useCache and cacheHash and cacheHash in klass._CHEETAH_compileCache:
cacheItem = klass._CHEETAH_compileCache[cacheHash]
generatedModuleCode = cacheItem.code
else:
compiler = compilerClass(source, file,
moduleName=moduleName,
mainClassName=className,
baseclassName=baseclassName,
mainMethodName=mainMethodName,
settings=(compilerSettings or {}))
if commandlineopts:
compiler.setShBang(commandlineopts.shbang)
compiler.compile()
generatedModuleCode = compiler.getModuleCode()
outputEncoding = compiler.getModuleEncoding()
if not returnAClass:
# This is a bit of a hackish solution to make sure we're setting the proper
# encoding on generated code that is destined to be written to a file
if not outputEncoding == 'ascii':
generatedModuleCode = generatedModuleCode.split('\n')
generatedModuleCode.insert(1, '# -*- coding: %s -*-' % outputEncoding)
generatedModuleCode = '\n'.join(generatedModuleCode)
return generatedModuleCode.encode(outputEncoding)
else:
if cacheItem:
cacheItem.lastCheckoutTime = time.time()
return cacheItem.klass
try:
klass._CHEETAH_compileLock.acquire()
uniqueModuleName = _genUniqueModuleName(moduleName)
__file__ = uniqueModuleName+'.py' # relative file path with no dir part
if cacheModuleFilesForTracebacks:
if not os.path.exists(cacheDirForModuleFiles):
raise Exception('%s does not exist'%cacheDirForModuleFiles)
__file__ = os.path.join(cacheDirForModuleFiles, __file__)
# @@TR: might want to assert that it doesn't already exist
open(__file__, 'w').write(generatedModuleCode)
# @@TR: should probably restrict the perms, etc.
mod = new.module(str(uniqueModuleName))
if moduleGlobals:
for k, v in moduleGlobals.items():
setattr(mod, k, v)
mod.__file__ = __file__
if __orig_file__ and os.path.exists(__orig_file__):
# this is used in the WebKit filemonitoring code
mod.__orig_file__ = __orig_file__
if baseclass and baseclassValue:
setattr(mod, baseclassName, baseclassValue)
##
try:
co = compile(generatedModuleCode, __file__, 'exec')
exec(co, mod.__dict__)
except SyntaxError, e:
try:
parseError = genParserErrorFromPythonException(
source, file, generatedModuleCode, exception=e)
except:
updateLinecache(__file__, generatedModuleCode)
e.generatedModuleCode = generatedModuleCode
raise e
else:
raise parseError
except Exception, e:
updateLinecache(__file__, generatedModuleCode)
e.generatedModuleCode = generatedModuleCode
raise
##
sys.modules[uniqueModuleName] = mod
finally:
klass._CHEETAH_compileLock.release()
templateClass = getattr(mod, className)
if (cacheCompilationResults
and cacheHash
and cacheHash not in klass._CHEETAH_compileCache):
cacheItem = CompileCacheItem()
cacheItem.cacheTime = cacheItem.lastCheckoutTime = time.time()
cacheItem.code = generatedModuleCode
cacheItem.klass = templateClass
templateClass._CHEETAH_isInCompilationCache = True
klass._CHEETAH_compileCache[cacheHash] = cacheItem
else:
templateClass._CHEETAH_isInCompilationCache = False
if keepRefToGeneratedCode or cacheCompilationResults:
templateClass._CHEETAH_generatedModuleCode = generatedModuleCode
# If we have a compiler object, let's set it to the compiler class
# to help the directive analyzer code
if compiler:
templateClass._CHEETAH_compilerInstance = compiler
return templateClass
@classmethod
def subclass(klass, *args, **kws):
"""Takes the same args as the .compile() classmethod and returns a
template that is a subclass of the template this method is called from.
T1 = Template.compile(' foo - $meth1 - bar\n#def meth1: this is T1.meth1')
T2 = T1.subclass('#implements meth1\n this is T2.meth1')
"""
kws['baseclass'] = klass
if isinstance(klass, Template):
templateAPIClass = klass
else:
templateAPIClass = Template
return templateAPIClass.compile(*args, **kws)
@classmethod
def _preprocessSource(klass, source, file, preprocessors):
"""Iterates through the .compile() classmethod's preprocessors argument
and pipes the source code through each each preprocessor.
It returns the tuple (source, file) which is then used by
Template.compile to finish the compilation.
"""
if not isinstance(preprocessors, (list, tuple)):
preprocessors = [preprocessors]
for preprocessor in preprocessors:
preprocessor = klass._normalizePreprocessorArg(preprocessor)
source, file = preprocessor.preprocess(source, file)
return source, file
@classmethod
def _normalizePreprocessorArg(klass, arg):
"""Used to convert the items in the .compile() classmethod's
preprocessors argument into real source preprocessors. This permits the
use of several shortcut forms for defining preprocessors.
"""
if hasattr(arg, 'preprocess'):
return arg
elif hasattr(arg, '__call__'):
class WrapperPreprocessor:
def preprocess(self, source, file):
return arg(source, file)
return WrapperPreprocessor()
else:
class Settings(object):
placeholderToken = None
directiveToken = None
settings = Settings()
if isinstance(arg, str) or isinstance(arg, (list, tuple)):
settings.tokens = arg
elif isinstance(arg, dict):
for k, v in arg.items():
setattr(settings, k, v)
else:
settings = arg
settings = klass._normalizePreprocessorSettings(settings)
return klass._CHEETAH_defaultPreprocessorClass(settings)
@classmethod
def _normalizePreprocessorSettings(klass, settings):
settings.keepRefToGeneratedCode = True
def normalizeSearchList(searchList):
if not isinstance(searchList, (list, tuple)):
searchList = [searchList]
return searchList
def normalizeTokens(tokens):
if isinstance(tokens, str):
return tokens.split() # space delimited string e.g.'@ %'
elif isinstance(tokens, (list, tuple)):
return tokens
else:
raise PreprocessError('invalid tokens argument: %r'%tokens)
if hasattr(settings, 'tokens'):
(settings.placeholderToken,
settings.directiveToken) = normalizeTokens(settings.tokens)
if (not getattr(settings, 'compilerSettings', None)
and not getattr(settings, 'placeholderToken', None) ):
raise TypeError(
'Preprocessor requires either a "tokens" or a "compilerSettings" arg.'
' Neither was provided.')
if not hasattr(settings, 'templateInitArgs'):
settings.templateInitArgs = {}
if 'searchList' not in settings.templateInitArgs:
if not hasattr(settings, 'searchList') and hasattr(settings, 'namespaces'):
settings.searchList = settings.namespaces
elif not hasattr(settings, 'searchList'):
settings.searchList = []
settings.templateInitArgs['searchList'] = settings.searchList
settings.templateInitArgs['searchList'] = (
normalizeSearchList(settings.templateInitArgs['searchList']))
if not hasattr(settings, 'outputTransformer'):
settings.outputTransformer = unicode
if not hasattr(settings, 'templateAPIClass'):
class PreprocessTemplateAPIClass(klass): pass
settings.templateAPIClass = PreprocessTemplateAPIClass
if not hasattr(settings, 'compilerSettings'):
settings.compilerSettings = {}
klass._updateSettingsWithPreprocessTokens(
compilerSettings=settings.compilerSettings,
placeholderToken=settings.placeholderToken,
directiveToken=settings.directiveToken
)
return settings
@classmethod
def _updateSettingsWithPreprocessTokens(
klass, compilerSettings, placeholderToken, directiveToken):
if (placeholderToken and 'cheetahVarStartToken' not in compilerSettings):
compilerSettings['cheetahVarStartToken'] = placeholderToken
if directiveToken:
if 'directiveStartToken' not in compilerSettings:
compilerSettings['directiveStartToken'] = directiveToken
if 'directiveEndToken' not in compilerSettings:
compilerSettings['directiveEndToken'] = directiveToken
if 'commentStartToken' not in compilerSettings:
compilerSettings['commentStartToken'] = directiveToken*2
if 'multiLineCommentStartToken' not in compilerSettings:
compilerSettings['multiLineCommentStartToken'] = (
directiveToken+'*')
if 'multiLineCommentEndToken' not in compilerSettings:
compilerSettings['multiLineCommentEndToken'] = (
'*'+directiveToken)
if 'EOLSlurpToken' not in compilerSettings:
compilerSettings['EOLSlurpToken'] = directiveToken
@classmethod
def _addCheetahPlumbingCodeToClass(klass, concreteTemplateClass):
"""If concreteTemplateClass is not a subclass of Cheetah.Template, add
the required cheetah methods and attributes to it.
This is called on each new template class after it has been compiled.
If concreteTemplateClass is not a subclass of Cheetah.Template but
already has method with the same name as one of the required cheetah
methods, this will skip that method.
"""
for methodname in klass._CHEETAH_requiredCheetahMethods:
if not hasattr(concreteTemplateClass, methodname):
method = getattr(Template, methodname)
newMethod = new.instancemethod(method.im_func, None, concreteTemplateClass)
#print methodname, method
setattr(concreteTemplateClass, methodname, newMethod)
for classMethName in klass._CHEETAH_requiredCheetahClassMethods:
if not hasattr(concreteTemplateClass, classMethName):
meth = getattr(klass, classMethName)
setattr(concreteTemplateClass, classMethName, classmethod(meth.im_func))
for attrname in klass._CHEETAH_requiredCheetahClassAttributes:
attrname = '_CHEETAH_'+attrname
if not hasattr(concreteTemplateClass, attrname):
attrVal = getattr(klass, attrname)
setattr(concreteTemplateClass, attrname, attrVal)
if (not hasattr(concreteTemplateClass, '__str__')
or concreteTemplateClass.__str__ is object.__str__):
mainMethNameAttr = '_mainCheetahMethod_for_'+concreteTemplateClass.__name__
mainMethName = getattr(concreteTemplateClass, mainMethNameAttr, None)
if mainMethName:
def __str__(self):
rc = getattr(self, mainMethName)()
if isinstance(rc, unicode):
return rc.encode('utf-8')
return rc
def __unicode__(self):
return getattr(self, mainMethName)()
elif (hasattr(concreteTemplateClass, 'respond')
and concreteTemplateClass.respond!=Servlet.respond):
def __str__(self):
rc = self.respond()
if isinstance(rc, unicode):
return rc.encode('utf-8')
return rc
def __unicode__(self):
return self.respond()
else:
def __str__(self):
rc = None
if hasattr(self, mainMethNameAttr):
rc = getattr(self, mainMethNameAttr)()
elif hasattr(self, 'respond'):
rc = self.respond()
else:
rc = super(self.__class__, self).__str__()
if isinstance(rc, unicode):
return rc.encode('utf-8')
return rc
def __unicode__(self):
if hasattr(self, mainMethNameAttr):
return getattr(self, mainMethNameAttr)()
elif hasattr(self, 'respond'):
return self.respond()
else:
return super(self.__class__, self).__unicode__()
__str__ = new.instancemethod(__str__, None, concreteTemplateClass)
__unicode__ = new.instancemethod(__unicode__, None, concreteTemplateClass)
setattr(concreteTemplateClass, '__str__', __str__)
setattr(concreteTemplateClass, '__unicode__', __unicode__)
def __init__(self, source=None,
namespaces=None, searchList=None,
# use either or. They are aliases for the same thing.
file=None,
filter='RawOrEncodedUnicode', # which filter from Cheetah.Filters
filtersLib=Filters,
errorCatcher=None,
compilerSettings=Unspecified, # control the behaviour of the compiler
_globalSetVars=None, # used internally for #include'd templates
_preBuiltSearchList=None # used internally for #include'd templates
):
"""a) compiles a new template OR b) instantiates an existing template.
Read this docstring carefully as there are two distinct usage patterns.
You should also read this class' main docstring.
a) to compile a new template:
t = Template(source=aSourceString)
# or
t = Template(file='some/path')
# or
t = Template(file=someFileObject)
# or
namespaces = [{'foo':'bar'}]
t = Template(source=aSourceString, namespaces=namespaces)
# or
t = Template(file='some/path', namespaces=namespaces)
print t
b) to create an instance of an existing, precompiled template class:
## i) first you need a reference to a compiled template class:
tclass = Template.compile(source=src) # or just Template.compile(src)
# or
tclass = Template.compile(file='some/path')
# or
tclass = Template.compile(file=someFileObject)
# or
# if you used the command line compiler or have Cheetah's ImportHooks
# installed your template class is also available via Python's
# standard import mechanism:
from ACompileTemplate import AcompiledTemplate as tclass
## ii) then you create an instance
t = tclass(namespaces=namespaces)
# or
t = tclass(namespaces=namespaces, filter='RawOrEncodedUnicode')
print t
Arguments:
for usage pattern a)
If you are compiling a new template, you must provide either a
'source' or 'file' arg, but not both:
- source (string or None)
- file (string path, file-like object, or None)
Optional args (see below for more) :
- compilerSettings
Default: Template._CHEETAH_compilerSettings=None
a dictionary of settings to override those defined in
DEFAULT_COMPILER_SETTINGS. See
Cheetah.Template.DEFAULT_COMPILER_SETTINGS and the Users' Guide
for details.
You can pass the source arg in as a positional arg with this usage
pattern. Use keywords for all other args.
for usage pattern b)
Do not use positional args with this usage pattern, unless your
template subclasses something other than Cheetah.Template and you
want to pass positional args to that baseclass. E.g.:
dictTemplate = Template.compile('hello $name from $caller', baseclass=dict)
tmplvars = dict(name='world', caller='me')
print dictTemplate(tmplvars)
This usage requires all Cheetah args to be passed in as keyword args.
optional args for both usage patterns:
- namespaces (aka 'searchList')
Default: None
an optional list of namespaces (dictionaries, objects, modules,
etc.) which Cheetah will search through to find the variables
referenced in $placeholders.
If you provide a single namespace instead of a list, Cheetah will
automatically convert it into a list.
NOTE: Cheetah does NOT force you to use the namespaces search list
and related features. It's on by default, but you can turn if off
using the compiler settings useSearchList=False or
useNameMapper=False.
- filter
Default: 'EncodeUnicode'
Which filter should be used for output filtering. This should
either be a string which is the name of a filter in the
'filtersLib' or a subclass of Cheetah.Filters.Filter. . See the
Users' Guide for more details.
- filtersLib
Default: Cheetah.Filters
A module containing subclasses of Cheetah.Filters.Filter. See the
Users' Guide for more details.
- errorCatcher
Default: None
This is a debugging tool. See the Users' Guide for more details.
Do not use this or the #errorCatcher diretive with live
production systems.
Do NOT mess with the args _globalSetVars or _preBuiltSearchList!
"""
errmsg = "arg '%s' must be %s"
errmsgextra = errmsg + "\n%s"
if not isinstance(source, (types.NoneType, basestring)):
raise TypeError(errmsg % ('source', 'string or None'))
if not isinstance(source, (types.NoneType, basestring, types.FileType)):
raise TypeError(errmsg %
('file', 'string, file open for reading, or None'))
if not isinstance(filter, (basestring, types.TypeType)) and not \
(isinstance(filter, types.ClassType) and issubclass(filter, Filters.Filter)):
raise TypeError(errmsgextra %
('filter', 'string or class',
'(if class, must be subclass of Cheetah.Filters.Filter)'))
if not isinstance(filtersLib, (basestring, types.ModuleType)):
raise TypeError(errmsgextra %
('filtersLib', 'string or module',
'(if module, must contain subclasses of Cheetah.Filters.Filter)'))
if not errorCatcher is None:
err = True
if isinstance(errorCatcher, (basestring, types.TypeType)):
err = False
if isinstance(errorCatcher, types.ClassType) and \
issubclass(errorCatcher, ErrorCatchers.ErrorCatcher):
err = False
if err:
raise TypeError(errmsgextra %
('errorCatcher', 'string, class or None',
'(if class, must be subclass of Cheetah.ErrorCatchers.ErrorCatcher)'))
if compilerSettings is not Unspecified:
if not isinstance(compilerSettings, types.DictType):
raise TypeError(errmsg %
('compilerSettings', 'dictionary'))
if source is not None and file is not None:
raise TypeError("you must supply either a source string or the" +
" 'file' keyword argument, but not both")
##################################################
## Do superclass initialization.
super(Template, self).__init__()
##################################################
## Do required version check
if not hasattr(self, '_CHEETAH_versionTuple'):
try:
mod = sys.modules[self.__class__.__module__]
compiledVersion = mod.__CHEETAH_version__
compiledVersionTuple = convertVersionStringToTuple(compiledVersion)
if compiledVersionTuple < MinCompatibleVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
compiledVersion, MinCompatibleVersion))
except AssertionError:
raise
except:
pass
##################################################
## Setup instance state attributes used during the life of template
## post-compile
if searchList:
for namespace in searchList:
if isinstance(namespace, dict):
intersection = self.Reserved_SearchList & set(namespace.keys())
warn = False
if intersection:
warn = True
if isinstance(compilerSettings, dict) and compilerSettings.get('prioritizeSearchListOverSelf'):
warn = False
if warn:
logging.info(''' The following keys are members of the Template class and will result in NameMapper collisions! ''')
logging.info(''' > %s ''' % ', '.join(list(intersection)))
logging.info(''' Please change the key's name or use the compiler setting "prioritizeSearchListOverSelf=True" to prevent the NameMapper from using ''')
logging.info(''' the Template member in place of your searchList variable ''')
self._initCheetahInstance(
searchList=searchList, namespaces=namespaces,
filter=filter, filtersLib=filtersLib,
errorCatcher=errorCatcher,
_globalSetVars=_globalSetVars,
compilerSettings=compilerSettings,
_preBuiltSearchList=_preBuiltSearchList)
##################################################
## Now, compile if we're meant to
if (source is not None) or (file is not None):
self._compile(source, file, compilerSettings=compilerSettings)
def generatedModuleCode(self):
"""Return the module code the compiler generated, or None if no
compilation took place.
"""
return self._CHEETAH_generatedModuleCode
def generatedClassCode(self):
"""Return the class code the compiler generated, or None if no
compilation took place.
"""
return self._CHEETAH_generatedModuleCode[
self._CHEETAH_generatedModuleCode.find('\nclass '):
self._CHEETAH_generatedModuleCode.find('\n## END CLASS DEFINITION')]
def searchList(self):
"""Return a reference to the searchlist
"""
return self._CHEETAH__searchList
def errorCatcher(self):
"""Return a reference to the current errorCatcher
"""
return self._CHEETAH__errorCatcher
## cache methods ##
def _getCacheStore(self):
if not self._CHEETAH__cacheStore:
if self._CHEETAH_cacheStore is not None:
self._CHEETAH__cacheStore = self._CHEETAH_cacheStore
else:
# @@TR: might want to provide a way to provide init args
self._CHEETAH__cacheStore = self._CHEETAH_cacheStoreClass()
return self._CHEETAH__cacheStore
def _getCacheStoreIdPrefix(self):
if self._CHEETAH_cacheStoreIdPrefix is not None:
return self._CHEETAH_cacheStoreIdPrefix
else:
return str(id(self))
def _createCacheRegion(self, regionID):
return self._CHEETAH_cacheRegionClass(
regionID=regionID,
templateCacheIdPrefix=self._getCacheStoreIdPrefix(),
cacheStore=self._getCacheStore())
def getCacheRegion(self, regionID, cacheInfo=None, create=True):
cacheRegion = self._CHEETAH__cacheRegions.get(regionID)
if not cacheRegion and create:
cacheRegion = self._createCacheRegion(regionID)
self._CHEETAH__cacheRegions[regionID] = cacheRegion
return cacheRegion
def getCacheRegions(self):
"""Returns a dictionary of the 'cache regions' initialized in a
template.
Each #cache directive block or $*cachedPlaceholder is a separate 'cache
region'.
"""
# returns a copy to prevent users mucking it up
return self._CHEETAH__cacheRegions.copy()
def refreshCache(self, cacheRegionId=None, cacheItemId=None):
"""Refresh a cache region or a specific cache item within a region.
"""
if not cacheRegionId:
for key, cregion in self.getCacheRegions():
cregion.clear()
else:
cregion = self._CHEETAH__cacheRegions.get(cacheRegionId)
if not cregion:
return
if not cacheItemId: # clear the desired region and all its cacheItems
cregion.clear()
else: # clear one specific cache of a specific region
cache = cregion.getCacheItem(cacheItemId)
if cache:
cache.clear()
## end cache methods ##
def shutdown(self):
"""Break reference cycles before discarding a servlet.
"""
try:
Servlet.shutdown(self)
except:
pass
self._CHEETAH__searchList = None
self.__dict__ = {}
## utility functions ##
def getVar(self, varName, default=Unspecified, autoCall=True):
"""Get a variable from the searchList. If the variable can't be found
in the searchList, it returns the default value if one was given, or
raises NameMapper.NotFound.
"""
try:
return valueFromSearchList(self.searchList(), varName.replace('$', ''), autoCall)
except NotFound:
if default is not Unspecified:
return default
else:
raise
def varExists(self, varName, autoCall=True):
"""Test if a variable name exists in the searchList.
"""
try:
valueFromSearchList(self.searchList(), varName.replace('$', ''), autoCall)
return True
except NotFound:
return False
hasVar = varExists
def i18n(self, message,
plural=None,
n=None,
id=None,
domain=None,
source=None,
target=None,
comment=None
):
"""This is just a stub at this time.
plural = the plural form of the message
n = a sized argument to distinguish between single and plural forms
id = msgid in the translation catalog
domain = translation domain
source = source lang
target = a specific target lang
comment = a comment to the translation team
See the following for some ideas
http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport
Other notes:
- There is no need to replicate the i18n:name attribute from plone / PTL,
as cheetah placeholders serve the same purpose
"""
return message
def getFileContents(self, path):
"""A hook for getting the contents of a file. The default
implementation just uses the Python open() function to load local files.
This method could be reimplemented to allow reading of remote files via
various protocols, as PHP allows with its 'URL fopen wrapper'
"""
fp = open(path, 'r')
output = fp.read()
fp.close()
return output
def runAsMainProgram(self):
"""Allows the Template to function as a standalone command-line program
for static page generation.
Type 'python yourtemplate.py --help to see what it's capabable of.
"""
from TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=self).run()
##################################################
## internal methods -- not to be called by end-users
def _initCheetahInstance(self,
searchList=None,
namespaces=None,
filter='RawOrEncodedUnicode', # which filter from Cheetah.Filters
filtersLib=Filters,
errorCatcher=None,
_globalSetVars=None,
compilerSettings=None,
_preBuiltSearchList=None):
"""Sets up the instance attributes that cheetah templates use at
run-time.
This is automatically called by the __init__ method of compiled
templates.
Note that the names of instance attributes used by Cheetah are prefixed
with '_CHEETAH__' (2 underscores), where class attributes are prefixed
with '_CHEETAH_' (1 underscore).
"""
if getattr(self, '_CHEETAH__instanceInitialized', False):
return
if namespaces is not None:
assert searchList is None, (
'Provide "namespaces" or "searchList", not both!')
searchList = namespaces
if searchList is not None and not isinstance(searchList, (list, tuple)):
searchList = [searchList]
self._CHEETAH__globalSetVars = {}
if _globalSetVars is not None:
# this is intended to be used internally by Nested Templates in #include's
self._CHEETAH__globalSetVars = _globalSetVars
if _preBuiltSearchList is not None:
# happens with nested Template obj creation from #include's
self._CHEETAH__searchList = list(_preBuiltSearchList)
self._CHEETAH__searchList.append(self)
else:
# create our own searchList
self._CHEETAH__searchList = [self._CHEETAH__globalSetVars, self]
if searchList is not None:
if isinstance(compilerSettings, dict) and compilerSettings.get('prioritizeSearchListOverSelf'):
self._CHEETAH__searchList = searchList + self._CHEETAH__searchList
else:
self._CHEETAH__searchList.extend(list(searchList))
self._CHEETAH__cheetahIncludes = {}
self._CHEETAH__cacheRegions = {}
self._CHEETAH__indenter = Indenter()
# @@TR: consider allowing simple callables as the filter argument
self._CHEETAH__filtersLib = filtersLib
self._CHEETAH__filters = {}
if isinstance(filter, basestring):
filterName = filter
klass = getattr(self._CHEETAH__filtersLib, filterName)
else:
klass = filter
filterName = klass.__name__
self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName] = klass(self).filter
self._CHEETAH__initialFilter = self._CHEETAH__currentFilter
self._CHEETAH__errorCatchers = {}
if errorCatcher:
if isinstance(errorCatcher, basestring):
errorCatcherClass = getattr(ErrorCatchers, errorCatcher)
elif isinstance(errorCatcher, ClassType):
errorCatcherClass = errorCatcher
self._CHEETAH__errorCatcher = ec = errorCatcherClass(self)
self._CHEETAH__errorCatchers[errorCatcher.__class__.__name__] = ec
else:
self._CHEETAH__errorCatcher = None
self._CHEETAH__initErrorCatcher = self._CHEETAH__errorCatcher
if not hasattr(self, 'transaction'):
self.transaction = None
self._CHEETAH__instanceInitialized = True
self._CHEETAH__isBuffering = False
self._CHEETAH__isControlledByWebKit = False
self._CHEETAH__cacheStore = None
if self._CHEETAH_cacheStore is not None:
self._CHEETAH__cacheStore = self._CHEETAH_cacheStore
def _compile(self, source=None, file=None, compilerSettings=Unspecified,
moduleName=None, mainMethodName=None):
"""Compile the template. This method is automatically called by
Template.__init__ it is provided with 'file' or 'source' args.
USERS SHOULD *NEVER* CALL THIS METHOD THEMSELVES. Use Template.compile
instead.
"""
if compilerSettings is Unspecified:
compilerSettings = self._getCompilerSettings(source, file) or {}
mainMethodName = mainMethodName or self._CHEETAH_defaultMainMethodName
self._fileMtime = None
self._fileDirName = None
self._fileBaseName = None
if file and isinstance(file, basestring):
file = self.serverSidePath(file)
self._fileMtime = os.path.getmtime(file)
self._fileDirName, self._fileBaseName = os.path.split(file)
self._filePath = file
templateClass = self.compile(source, file,
moduleName=moduleName,
mainMethodName=mainMethodName,
compilerSettings=compilerSettings,
keepRefToGeneratedCode=True)
self.__class__ = templateClass
# must initialize it so instance attributes are accessible
templateClass.__init__(self,
#_globalSetVars=self._CHEETAH__globalSetVars,
#_preBuiltSearchList=self._CHEETAH__searchList
)
if not hasattr(self, 'transaction'):
self.transaction = None
def _handleCheetahInclude(self, srcArg, trans=None, includeFrom='file', raw=False):
"""Called at runtime to handle #include directives.
"""
_includeID = srcArg
if _includeID not in self._CHEETAH__cheetahIncludes:
if not raw:
if includeFrom == 'file':
source = None
if type(srcArg) in StringTypes:
if hasattr(self, 'serverSidePath'):
file = path = self.serverSidePath(srcArg)
else:
file = path = os.path.normpath(srcArg)
else:
file = srcArg ## a file-like object
else:
source = srcArg
file = None
# @@TR: might want to provide some syntax for specifying the
# Template class to be used for compilation so compilerSettings
# can be changed.
compiler = self._getTemplateAPIClassForIncludeDirectiveCompilation(source, file)
nestedTemplateClass = compiler.compile(source=source, file=file)
nestedTemplate = nestedTemplateClass(_preBuiltSearchList=self.searchList(),
_globalSetVars=self._CHEETAH__globalSetVars)
# Set the inner template filters to the initial filter of the
# outer template:
# this is the only really safe way to use
# filter='WebSafe'.
nestedTemplate._CHEETAH__initialFilter = self._CHEETAH__initialFilter
nestedTemplate._CHEETAH__currentFilter = self._CHEETAH__initialFilter
self._CHEETAH__cheetahIncludes[_includeID] = nestedTemplate
else:
if includeFrom == 'file':
path = self.serverSidePath(srcArg)
self._CHEETAH__cheetahIncludes[_includeID] = self.getFileContents(path)
else:
self._CHEETAH__cheetahIncludes[_includeID] = srcArg
##
if not raw:
self._CHEETAH__cheetahIncludes[_includeID].respond(trans)
else:
trans.response().write(self._CHEETAH__cheetahIncludes[_includeID])
def _getTemplateAPIClassForIncludeDirectiveCompilation(self, source, file):
"""Returns the subclass of Template which should be used to compile
#include directives.
This abstraction allows different compiler settings to be used in the
included template than were used in the parent.
"""
if issubclass(self.__class__, Template):
return self.__class__
else:
return Template
## functions for using templates as CGI scripts
def webInput(self, names, namesMulti=(), default='', src='f',
defaultInt=0, defaultFloat=0.00, badInt=0, badFloat=0.00, debug=False):
"""Method for importing web transaction variables in bulk.
This works for GET/POST fields both in Webware servlets and in CGI
scripts, and for cookies and session variables in Webware servlets. If
you try to read a cookie or session variable in a CGI script, you'll get
a RuntimeError. 'In a CGI script' here means 'not running as a Webware
servlet'. If the CGI environment is not properly set up, Cheetah will
act like there's no input.
The public method provided is:
def webInput(self, names, namesMulti=(), default='', src='f',
defaultInt=0, defaultFloat=0.00, badInt=0, badFloat=0.00, debug=False):
This method places the specified GET/POST fields, cookies or session
variables into a dictionary, which is both returned and put at the
beginning of the searchList. It handles:
* single vs multiple values
* conversion to integer or float for specified names
* default values/exceptions for missing or bad values
* printing a snapshot of all values retrieved for debugging
All the 'default*' and 'bad*' arguments have 'use or raise' behavior,
meaning that if they're a subclass of Exception, they're raised. If
they're anything else, that value is substituted for the missing/bad
value.
The simplest usage is:
#silent $webInput(['choice'])
$choice
dic = self.webInput(['choice'])
write(dic['choice'])
Both these examples retrieves the GET/POST field 'choice' and print it.
If you leave off the'#silent', all the values would be printed too. But
a better way to preview the values is
#silent $webInput(['name'], $debug=1)
because this pretty-prints all the values inside HTML <PRE> tags.
** KLUDGE: 'debug' is supposed to insert into the template output, but it
wasn't working so I changed it to a'print' statement. So the debugging
output will appear wherever standard output is pointed, whether at the
terminal, in a Webware log file, or whatever. ***
Since we didn't specify any coversions, the value is a string. It's a
'single' value because we specified it in 'names' rather than
'namesMulti'. Single values work like this:
* If one value is found, take it.
* If several values are found, choose one arbitrarily and ignore the rest.
* If no values are found, use or raise the appropriate 'default*' value.
Multi values work like this:
* If one value is found, put it in a list.
* If several values are found, leave them in a list.
* If no values are found, use the empty list ([]). The 'default*'
arguments are *not* consulted in this case.
Example: assume 'days' came from a set of checkboxes or a multiple combo
box on a form, and the user chose'Monday', 'Tuesday' and 'Thursday'.
#silent $webInput([], ['days'])
The days you chose are: #slurp
#for $day in $days
$day #slurp
#end for
dic = self.webInput([], ['days'])
write('The days you chose are: ')
for day in dic['days']:
write(day + ' ')
Both these examples print: 'The days you chose are: Monday Tuesday Thursday'.
By default, missing strings are replaced by '' and missing/bad numbers
by zero. (A'bad number' means the converter raised an exception for
it, usually because of non-numeric characters in the value.) This
mimics Perl/PHP behavior, and simplifies coding for many applications
where missing/bad values *should* be blank/zero. In those relatively
few cases where you must distinguish between empty-string/zero on the
one hand and missing/bad on the other, change the appropriate
'default*' and 'bad*' arguments to something like:
* None
* another constant value
* $NonNumericInputError/self.NonNumericInputError
* $ValueError/ValueError
(NonNumericInputError is defined in this class and is useful for
distinguishing between bad input vs a TypeError/ValueError thrown for
some other rason.)
Here's an example using multiple values to schedule newspaper
deliveries. 'checkboxes' comes from a form with checkboxes for all the
days of the week. The days the user previously chose are preselected.
The user checks/unchecks boxes as desired and presses Submit. The value
of 'checkboxes' is a list of checkboxes that were checked when Submit
was pressed. Our task now is to turn on the days the user checked, turn
off the days he unchecked, and leave on or off the days he didn't
change.
dic = self.webInput([], ['dayCheckboxes'])
wantedDays = dic['dayCheckboxes'] # The days the user checked.
for day, on in self.getAllValues():
if not on and wantedDays.has_key(day):
self.TurnOn(day)
# ... Set a flag or insert a database record ...
elif on and not wantedDays.has_key(day):
self.TurnOff(day)
# ... Unset a flag or delete a database record ...
'source' allows you to look up the variables from a number of different
sources:
'f' fields (CGI GET/POST parameters)
'c' cookies
's' session variables
'v' 'values', meaning fields or cookies
In many forms, you're dealing only with strings, which is why the
'default' argument is third and the numeric arguments are banished to
the end. But sometimes you want automatic number conversion, so that
you can do numeric comparisions in your templates without having to
write a bunch of conversion/exception handling code. Example:
#silent $webInput(['name', 'height:int'])
$name is $height cm tall.
#if $height >= 300
Wow, you're tall!
#else
Pshaw, you're short.
#end if
dic = self.webInput(['name', 'height:int'])
name = dic[name]
height = dic[height]
write('%s is %s cm tall.' % (name, height))
if height > 300:
write('Wow, you're tall!')
else:
write('Pshaw, you're short.')
To convert a value to a number, suffix ':int' or ':float' to the name.
The method will search first for a 'height:int' variable and then for a
'height' variable. (It will be called 'height' in the final
dictionary.) If a numeric conversion fails, use or raise 'badInt' or
'badFloat'. Missing values work the same way as for strings, except the
default is 'defaultInt' or 'defaultFloat' instead of 'default'.
If a name represents an uploaded file, the entire file will be read into
memory. For more sophistocated file-upload handling, leave that name
out of the list and do your own handling, or wait for
Cheetah.Utils.UploadFileMixin.
This only in a subclass that also inherits from Webware's Servlet or
HTTPServlet. Otherwise you'll get an AttributeError on 'self.request'.
EXCEPTIONS: ValueError if 'source' is not one of the stated characters.
TypeError if a conversion suffix is not ':int' or ':float'.
FUTURE EXPANSION: a future version of this method may allow source
cascading; e.g., 'vs' would look first in 'values' and then in session
variables.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.186 $
Start Date: 2002/03/17
Last Revision Date: $Date: 2008/03/10 04:48:11 $
"""
src = src.lower()
isCgi = not self._CHEETAH__isControlledByWebKit
if isCgi and src in ('f', 'v'):
global _formUsedByWebInput
if _formUsedByWebInput is None:
_formUsedByWebInput = cgi.FieldStorage()
source, func = 'field', _formUsedByWebInput.getvalue
elif isCgi and src == 'c':
raise RuntimeError("can't get cookies from a CGI script")
elif isCgi and src == 's':
raise RuntimeError("can't get session variables from a CGI script")
elif isCgi and src == 'v':
source, func = 'value', self.request().value
elif isCgi and src == 's':
source, func = 'session', self.request().session().value
elif src == 'f':
source, func = 'field', self.request().field
elif src == 'c':
source, func = 'cookie', self.request().cookie
elif src == 'v':
source, func = 'value', self.request().value
elif src == 's':
source, func = 'session', self.request().session().value
else:
raise TypeError("arg 'src' invalid")
sources = source + 's'
converters = {
'': _Converter('string', None, default, default ),
'int': _Converter('int', int, defaultInt, badInt ),
'float': _Converter('float', float, defaultFloat, badFloat), }
#pprint.pprint(locals()); return {}
dic = {} # Destination.
for name in names:
k, v = _lookup(name, func, False, converters)
dic[k] = v
for name in namesMulti:
k, v = _lookup(name, func, True, converters)
dic[k] = v
# At this point, 'dic' contains all the keys/values we want to keep.
# We could split the method into a superclass
# method for Webware/WebwareExperimental and a subclass for Cheetah.
# The superclass would merely 'return dic'. The subclass would
# 'dic = super(ThisClass, self).webInput(names, namesMulti, ...)'
# and then the code below.
if debug:
print("<PRE>\n" + pprint.pformat(dic) + "\n</PRE>\n\n")
self.searchList().insert(0, dic)
return dic
T = Template # Short and sweet for debugging at the >>> prompt.
Template.Reserved_SearchList = set(dir(Template))
def genParserErrorFromPythonException(source, file, generatedPyCode, exception):
#print dir(exception)
filename = isinstance(file, (str, unicode)) and file or None
sio = StringIO.StringIO()
traceback.print_exc(1, sio)
formatedExc = sio.getvalue()
if hasattr(exception, 'lineno'):
pyLineno = exception.lineno
else:
pyLineno = int(re.search('[ \t]*File.*line (\d+)', formatedExc).group(1))
lines = generatedPyCode.splitlines()
prevLines = [] # (i, content)
for i in range(1, 4):
if pyLineno-i <=0:
break
prevLines.append( (pyLineno+1-i, lines[pyLineno-i]) )
nextLines = [] # (i, content)
for i in range(1, 4):
if not pyLineno+i < len(lines):
break
nextLines.append( (pyLineno+i, lines[pyLineno+i]) )
nextLines.reverse()
report = 'Line|Python Code\n'
report += '----|-------------------------------------------------------------\n'
while prevLines:
lineInfo = prevLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
if hasattr(exception, 'offset'):
report += ' '*(3+(exception.offset or 0)) + '^\n'
while nextLines:
lineInfo = nextLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
message = [
"Error in the Python code which Cheetah generated for this template:",
'='*80,
'',
str(exception),
'',
report,
'='*80,
]
cheetahPosMatch = re.search('line (\d+), col (\d+)', formatedExc)
if cheetahPosMatch:
lineno = int(cheetahPosMatch.group(1))
col = int(cheetahPosMatch.group(2))
#if hasattr(exception, 'offset'):
# col = exception.offset
message.append('\nHere is the corresponding Cheetah code:\n')
else:
lineno = None
col = None
cheetahPosMatch = re.search('line (\d+), col (\d+)',
'\n'.join(lines[max(pyLineno-2, 0):]))
if cheetahPosMatch:
lineno = int(cheetahPosMatch.group(1))
col = int(cheetahPosMatch.group(2))
message.append('\nHere is the corresponding Cheetah code.')
message.append('** I had to guess the line & column numbers,'
' so they are probably incorrect:\n')
message = '\n'.join(message)
reader = SourceReader(source, filename=filename)
return ParseError(reader, message, lineno=lineno, col=col)
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
from glob import glob
import os
from os import listdir
import os.path
import re
from tempfile import mktemp
def _escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
return escapeRE.sub(r'\\\1', txt)
def findFiles(*args, **kw):
"""Recursively find all the files matching a glob pattern.
This function is a wrapper around the FileFinder class. See its docstring
for details about the accepted arguments, etc."""
return FileFinder(*args, **kw).files()
def replaceStrInFiles(files, theStr, repl):
"""Replace all instances of 'theStr' with 'repl' for each file in the 'files'
list. Returns a dictionary with data about the matches found.
This is like string.replace() on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
pattern = _escapeRegexChars(theStr)
return FindAndReplace(files, pattern, repl).results()
def replaceRegexInFiles(files, pattern, repl):
"""Replace all instances of regex 'pattern' with 'repl' for each file in the
'files' list. Returns a dictionary with data about the matches found.
This is like re.sub on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
return FindAndReplace(files, pattern, repl).results()
##################################################
## CLASSES
class FileFinder:
"""Traverses a directory tree and finds all files in it that match one of
the specified glob patterns."""
def __init__(self, rootPath,
globPatterns=('*',),
ignoreBasenames=('CVS', '.svn'),
ignoreDirs=(),
):
self._rootPath = rootPath
self._globPatterns = globPatterns
self._ignoreBasenames = ignoreBasenames
self._ignoreDirs = ignoreDirs
self._files = []
self.walkDirTree(rootPath)
def walkDirTree(self, dir='.',
listdir=os.listdir,
isdir=os.path.isdir,
join=os.path.join,
):
"""Recursively walk through a directory tree and find matching files."""
processDir = self.processDir
filterDir = self.filterDir
pendingDirs = [dir]
addDir = pendingDirs.append
getDir = pendingDirs.pop
while pendingDirs:
dir = getDir()
## process this dir
processDir(dir)
## and add sub-dirs
for baseName in listdir(dir):
fullPath = join(dir, baseName)
if isdir(fullPath):
if filterDir(baseName, fullPath):
addDir( fullPath )
def filterDir(self, baseName, fullPath):
"""A hook for filtering out certain dirs. """
return not (baseName in self._ignoreBasenames or
fullPath in self._ignoreDirs)
def processDir(self, dir, glob=glob):
extend = self._files.extend
for pattern in self._globPatterns:
extend( glob(os.path.join(dir, pattern)) )
def files(self):
return self._files
class _GenSubberFunc:
"""Converts a 'sub' string in the form that one feeds to re.sub (backrefs,
groups, etc.) into a function that can be used to do the substitutions in
the FindAndReplace class."""
backrefRE = re.compile(r'\\([1-9][0-9]*)')
groupRE = re.compile(r'\\g<([a-zA-Z_][a-zA-Z_]*)>')
def __init__(self, replaceStr):
self._src = replaceStr
self._pos = 0
self._codeChunks = []
self.parse()
def src(self):
return self._src
def pos(self):
return self._pos
def setPos(self, pos):
self._pos = pos
def atEnd(self):
return self._pos >= len(self._src)
def advance(self, offset=1):
self._pos += offset
def readTo(self, to, start=None):
if start == None:
start = self._pos
self._pos = to
if self.atEnd():
return self._src[start:]
else:
return self._src[start:to]
## match and get methods
def matchBackref(self):
return self.backrefRE.match(self.src(), self.pos())
def getBackref(self):
m = self.matchBackref()
self.setPos(m.end())
return m.group(1)
def matchGroup(self):
return self.groupRE.match(self.src(), self.pos())
def getGroup(self):
m = self.matchGroup()
self.setPos(m.end())
return m.group(1)
## main parse loop and the eat methods
def parse(self):
while not self.atEnd():
if self.matchBackref():
self.eatBackref()
elif self.matchGroup():
self.eatGroup()
else:
self.eatStrConst()
def eatStrConst(self):
startPos = self.pos()
while not self.atEnd():
if self.matchBackref() or self.matchGroup():
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
self.addChunk(repr(strConst))
def eatBackref(self):
self.addChunk( 'm.group(' + self.getBackref() + ')' )
def eatGroup(self):
self.addChunk( 'm.group("' + self.getGroup() + '")' )
def addChunk(self, chunk):
self._codeChunks.append(chunk)
## code wrapping methods
def codeBody(self):
return ', '.join(self._codeChunks)
def code(self):
return "def subber(m):\n\treturn ''.join([%s])\n" % (self.codeBody())
def subberFunc(self):
exec(self.code())
return subber
class FindAndReplace:
"""Find and replace all instances of 'patternOrRE' with 'replacement' for
each file in the 'files' list. This is a multi-file version of re.sub().
'patternOrRE' can be a raw regex pattern or
a regex object as generated by the re module. 'replacement' can be any
string that would work with patternOrRE.sub(replacement, fileContents).
"""
def __init__(self, files, patternOrRE, replacement,
recordResults=True):
if isinstance(patternOrRE, basestring):
self._regex = re.compile(patternOrRE)
else:
self._regex = patternOrRE
if isinstance(replacement, basestring):
self._subber = _GenSubberFunc(replacement).subberFunc()
else:
self._subber = replacement
self._pattern = pattern = self._regex.pattern
self._files = files
self._results = {}
self._recordResults = recordResults
## see if we should use pgrep to do the file matching
self._usePgrep = False
if (os.popen3('pgrep')[2].read()).startswith('Usage:'):
## now check to make sure pgrep understands the pattern
tmpFile = mktemp()
open(tmpFile, 'w').write('#')
if not (os.popen3('pgrep "' + pattern + '" ' + tmpFile)[2].read()):
# it didn't print an error msg so we're ok
self._usePgrep = True
os.remove(tmpFile)
self._run()
def results(self):
return self._results
def _run(self):
regex = self._regex
subber = self._subDispatcher
usePgrep = self._usePgrep
pattern = self._pattern
for file in self._files:
if not os.path.isfile(file):
continue # skip dirs etc.
self._currFile = file
found = False
if 'orig' in locals():
del orig
if self._usePgrep:
if os.popen('pgrep "' + pattern + '" ' + file ).read():
found = True
else:
orig = open(file).read()
if regex.search(orig):
found = True
if found:
if 'orig' not in locals():
orig = open(file).read()
new = regex.sub(subber, orig)
open(file, 'w').write(new)
def _subDispatcher(self, match):
if self._recordResults:
if self._currFile not in self._results:
res = self._results[self._currFile] = {}
res['count'] = 0
res['matches'] = []
else:
res = self._results[self._currFile]
res['count'] += 1
res['matches'].append({'contents': match.group(),
'start': match.start(),
'end': match.end(),
}
)
return self._subber(match)
class SourceFileStats:
"""
"""
_fileStats = None
def __init__(self, files):
self._fileStats = stats = {}
for file in files:
stats[file] = self.getFileStats(file)
def rawStats(self):
return self._fileStats
def summary(self):
codeLines = 0
blankLines = 0
commentLines = 0
totalLines = 0
for fileStats in self.rawStats().values():
codeLines += fileStats['codeLines']
blankLines += fileStats['blankLines']
commentLines += fileStats['commentLines']
totalLines += fileStats['totalLines']
stats = {'codeLines': codeLines,
'blankLines': blankLines,
'commentLines': commentLines,
'totalLines': totalLines,
}
return stats
def printStats(self):
pass
def getFileStats(self, fileName):
codeLines = 0
blankLines = 0
commentLines = 0
commentLineRe = re.compile(r'\s#.*$')
blankLineRe = re.compile('\s$')
lines = open(fileName).read().splitlines()
totalLines = len(lines)
for line in lines:
if commentLineRe.match(line):
commentLines += 1
elif blankLineRe.match(line):
blankLines += 1
else:
codeLines += 1
stats = {'codeLines': codeLines,
'blankLines': blankLines,
'commentLines': commentLines,
'totalLines': totalLines,
}
return stats
| Python |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Sep 8 2010)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
wx.ID_Window = 1000
wx.ID_Window_StatusBar = 1001
wx.ID_Window_MenuBar = 1002
wx.ID_Window_Quit = 1003
wx.ID_Window_SplitterWindow_LeftPanel = 1004
###########################################################################
## Class Window
###########################################################################
class Window ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_Window, title = u"Klein", pos = wx.DefaultPosition, size = wx.Size( 705,238 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.mStatusBar = self.CreateStatusBar( 1, wx.ST_SIZEGRIP, wx.ID_Window_StatusBar )
self.mMenuBar = wx.MenuBar( 0 )
self.mFile = wx.Menu()
self.mQuit = wx.MenuItem( self.mFile, wx.ID_Window_Quit, u"Quit", wx.EmptyString, wx.ITEM_NORMAL )
self.mFile.AppendItem( self.mQuit )
self.mMenuBar.Append( self.mFile, u"File" )
self.SetMenuBar( self.mMenuBar )
mSizer = wx.BoxSizer( wx.HORIZONTAL )
self.mSplitterWindow = wx.SplitterWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SP_3D )
self.mSplitterWindow.Bind( wx.EVT_IDLE, self.mSplitterWindowOnIdle )
self.mLeftPanel = wx.Panel( self.mSplitterWindow, wx.ID_Window_SplitterWindow_LeftPanel, wx.DefaultPosition, wx.DefaultSize, 0 )
mRightSizer = wx.BoxSizer( wx.VERTICAL )
self.mCanvasPanel = wx.Panel( self.mLeftPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.mCanvasPanel.SetBackgroundColour( wx.Colour( 128, 128, 128 ) )
mRightSizer.Add( self.mCanvasPanel, 1, wx.EXPAND |wx.ALL, 5 )
self.mLeftPanel.SetSizer( mRightSizer )
self.mLeftPanel.Layout()
mRightSizer.Fit( self.mLeftPanel )
self.mRightPanel = wx.Panel( self.mSplitterWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.VSCROLL )
mLeftSizer = wx.BoxSizer( wx.VERTICAL )
self.m_button38 = wx.Button( self.mRightPanel, wx.ID_ANY, u"1", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button38, 0, wx.ALL, 5 )
self.m_button39 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button39, 0, wx.ALL, 5 )
self.m_button40 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button40, 0, wx.ALL, 5 )
self.m_button41 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button41, 0, wx.ALL, 5 )
self.m_button42 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button42, 0, wx.ALL, 5 )
self.m_button43 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button43, 0, wx.ALL, 5 )
self.m_button44 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button44, 0, wx.ALL, 5 )
self.m_button45 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button45, 0, wx.ALL, 5 )
self.m_button46 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button46, 0, wx.ALL, 5 )
self.m_button47 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button47, 0, wx.ALL, 5 )
self.m_button48 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button48, 0, wx.ALL, 5 )
self.m_button49 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button49, 0, wx.ALL, 5 )
self.m_button50 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button50, 0, wx.ALL, 5 )
self.m_button51 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button51, 0, wx.ALL, 5 )
self.m_button52 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button52, 0, wx.ALL, 5 )
self.m_button53 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button53, 0, wx.ALL, 5 )
self.m_button54 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button54, 0, wx.ALL, 5 )
self.m_button55 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button55, 0, wx.ALL, 5 )
self.m_button56 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button56, 0, wx.ALL, 5 )
self.m_button57 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button57, 0, wx.ALL, 5 )
self.m_button58 = wx.Button( self.mRightPanel, wx.ID_ANY, u"-1", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button58, 0, wx.ALL, 5 )
self.mRightPanel.SetSizer( mLeftSizer )
self.mRightPanel.Layout()
mLeftSizer.Fit( self.mRightPanel )
self.mSplitterWindow.SplitVertically( self.mLeftPanel, self.mRightPanel, 486 )
mSizer.Add( self.mSplitterWindow, 1, wx.EXPAND, 5 )
self.SetSizer( mSizer )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
def mSplitterWindowOnIdle( self, event ):
self.mSplitterWindow.SetSashPosition( 486 )
self.mSplitterWindow.Unbind( wx.EVT_IDLE )
app = wx.App()
win = Window(None)
win.Show(True)
app.MainLoop()
| Python |
#! /usr/bin/env python
"""module to run timings test code
Modules to time-test can be specified in two ways
* bench modules can be automatically collected from directory where this
file is present if they are cython modules with sources having extension
`.pyx`. The modules are compiled if they are not already
* modules (python/cython module name w/o extension) can be passed as
commandline arguments to time-test the specified modules
The bench modules are special modules having a callable `bench` defined
which returns a list of a dict having string (name of bench) keys and
float (time taken) values. The list is only as a way group different tests.
The modules may implement the bench function in whichever way they deem fit.
To run bench modules which need mpi to execute multiple processes,
name the bench module as "mpi<num_procs>_<bench_name>.pyx",
replacing <num_procs> with the number of processes in which to run the bench
and <bench_name> with the name of you would use for the file.
An easy way to run in different number of processes is to create symlinks with
different names.
The result of a parallel bench is that returned by the bench function
of the root process.
The results of all the bench tests are displayed in a tabular format
Any output from the test modules id redirected to file `bench.log`
Output from mpi runs is redirected to `mpirunner.log.<rank>'
"""
import os
import sys
import traceback
import subprocess
import pickle
# local relative import
import setup
def list_pyx_extensions(path):
"""list the files in the path having .pyx extension w/o the extension"""
ret = [f[:-4] for f in os.listdir(path) if f[-3:]=='pyx' and f[0]!='_']
ret.sort()
return ret
def mpirun(bench_name, num_procs):
ret = subprocess.check_output(['mpiexec', '-n', str(num_procs), sys.executable,
'mpirunner.py', 'p', bench_name])
return pickle.loads(ret)
def run(extns=None, dirname=None, num_runs=1):
"""run the benchmarks in the modules given
`extns` is names of python modules to benchmark (None => all cython
extensions in dirname)
`dirname` is the directory where the modules are found (None implies
current directory
`num_runs` is the number of times to run the tests, the minimum value
is reported over all the runs
"""
if dirname is None:
dirname = os.path.abspath(os.curdir)
olddir = os.path.abspath(os.curdir)
os.chdir(dirname)
if extns is None:
extns = list_pyx_extensions(os.curdir)
print 'Running benchmarks:', ', '.join(extns)
# this is needed otherwise setup will take arguments and do something else
sys.argvold = sys.argv[:]
sys.argv = sys.argv[:1]
# compile the bench .pyx files
setup.compile_extns(extns, dirname)#, [os.path.join(dirname,'..','..')])
logfile = open('bench.log', 'w')
outtext = ''
for bench_name in extns:
stdout_orig = sys.stdout
stderr_orig = sys.stderr
sys.stdout = sys.stderr = logfile
mpi = False
if bench_name.startswith('mpi'):
mpi = True
num_procs = int(bench_name.lstrip('mpi').split('_')[0])
try:
# bench to be run in mpi
if mpi:
res = mpirun(bench_name, num_procs)
# normal single process bench
else:
bench_mod = __import__(bench_name)
res = bench_mod.bench()
except:
stderr_orig.write('Failure running bench %s\n' %(bench_name))
traceback.print_exc(file=stderr_orig)
continue
# take minimum over `num_runs` runs
for i in range(num_runs-1):
# bench to be run in mpi
if mpi:
r = mpirun(bench_name, num_procs)
# normal single process bench
else:
r = bench_mod.bench()
for jn,j in enumerate(res):
for k,v in j.items():
j[k] = min(v, r[jn].get(k, 1e1000))
sys.stdout = stdout_orig
sys.stderr = stderr_orig
if mpi:
s = bench_name.split('_',1)[1]+' %d\n'%num_procs
s += '#'*len(s)
print s
outtext += s + '\n'
else:
s = bench_name + '\n' + '#'*len(bench_name)
print s
outtext += s + '\n'
for func in res:
for k in sorted(func.keys()):
s = k.ljust(40) + '\t%g'%func[k]
print s
outtext += s + '\n'
print
outtext += '\n'
logfile.write(outtext)
logfile.close()
sys.argv = sys.argvold
os.chdir(olddir)
if __name__ == '__main__':
print sys.argv
if '-h' in sys.argv or '--help' in sys.argv:
print '''usage:
python setup.py [extension1, [extension2, [...]]]
runs the bench extensions present in the current directory
'''
elif len(sys.argv) > 1:
# run specified extensions
run(sys.argv[1:])
else:
# run all extensions found in current directory
run()
| Python |
""" some utility function for use in load_balance benchmark """
# MPI imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
import sys
import os
from os.path import join, exists
import traceback
from optparse import OptionParser
# logging imports
import logging
# local imports
from pysph.base.kernels import CubicSplineKernel
from pysph.base.point import Point
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.base.particle_array import ParticleArray
from pysph.parallel.load_balancer import get_load_balancer_class
from pysph.solver.particle_generator import DensityComputationMode as Dcm
from pysph.solver.particle_generator import MassComputationMode as Mcm
from pysph.solver.basic_generators import RectangleGenerator, LineGenerator
LoadBalancer = get_load_balancer_class()
def parse_options(args=None):
"""parse commandline options from given list (default=sys.argv[1:])"""
# default values
square_width = 1.0
np_d = 50
particle_spacing = square_width / np_d
particle_radius = square_width / np_d
sph_interpolations = 1
num_iterations = 10
num_load_balance_iterations = 500
max_cell_scale = 2.0
op = OptionParser()
op.add_option('-t', '--type', dest='type', default="square",
help='type of problem to load_balance, one of "dam_break" or "square"')
op.add_option('-w', '--width', dest='square_width',
metavar='SQUARE_WIDTH')
op.add_option('-s', '--spacing', dest='particle_spacing',
metavar='PARTICLE_SPACING')
op.add_option('-r', '--radius', dest='particle_radius',
metavar='PARTICLE_RADIUS')
op.add_option('-d', '--destdir', dest='destdir',
metavar='DESTDIR')
op.add_option('-i', '--sph-interpolations', dest='sph_interpolations',
metavar='SPH_INTERPOLATIONS')
op.add_option('-n', '--num-iterations', dest='num_iterations',
metavar='NUM_ITERATIONS')
op.add_option('-l', '--num-load-balance-iterations',
dest='num_load_balance_iterations',
metavar='NUM_LOAD_BALANCE_ITERATIONS')
op.add_option('-o', '--write-vtk',
action="store_true", default=False, dest='write_vtk',
help='write a vtk file after all iterations are done')
op.add_option('-v', '--verbose',
action="store_true", default=True, dest='verbose',
help='print large amounts of debug information')
op.add_option('-c', '--max-cell-scale', dest='max_cell_scale',
metavar='MAX_CELL_SCALE',
help='specify the ratio of largest cell to smallest cell')
# parse the input arguments
args = op.parse_args()
options = args[0]
# setup the default values or the ones passed from the command line
if options.destdir is None:
print 'No destination directory specified. Using current dir'
options.destdir = ''
options.destdir = os.path.abspath(options.destdir)
# create the destination directory if it does not exist.
if not exists(options.destdir):
os.mkdir(options.destdir)
# logging
options.logger = logger = logging.getLogger()
log_filename = os.path.join(options.destdir, 'load_balance.log.%d'%rank)
if options.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level, filename=log_filename, filemode='w')
#logger.addHandler(logging.StreamHandler())
# read the square_width to use
if options.square_width == None:
logger.warn('Using default square width of %f'%(square_width))
options.square_width = square_width
options.square_width = float(options.square_width)
# read the particle spacing
if options.particle_spacing == None:
logger.warn('Using default particle spacing of %f'%(particle_spacing))
options.particle_spacing = particle_spacing
options.particle_spacing = float(options.particle_spacing)
# read the particle radius
if options.particle_radius == None:
logger.warn('Using default particle radius of %f'%(particle_radius))
options.particle_radius = particle_radius
options.particle_radius = float(options.particle_radius)
# read the number of sph-interpolations to perform
if options.sph_interpolations == None:
logger.warn('Using default number of SPH interpolations %f'%(
sph_interpolations))
options.sph_interpolations = sph_interpolations
options.sph_interpolations = int(sph_interpolations)
# read the total number of iterations to run
if options.num_iterations == None:
logger.warn('Using default number of iterations %d'%(num_iterations))
options.num_iterations = num_iterations
options.num_iterations = int(options.num_iterations)
if options.num_load_balance_iterations == None:
logger.warn('Running %d initial load balance iterations'
%(num_load_balance_iterations))
options.num_load_balance_iterations = num_load_balance_iterations
options.num_load_balance_iterations = int(num_load_balance_iterations)
if options.max_cell_scale == None:
logger.warn('Using default max cell scale of %f'%(max_cell_scale))
options.max_cell_scale = max_cell_scale
options.max_cell_scale = float(options.max_cell_scale)
# one node zero - write this setting into a file.
if rank == 0:
settings_file = options.destdir + '/settings.dat'
f = open(settings_file, 'w')
f.write('Run with command : %s\n'%(sys.argv))
f.write('destdir = %s\n'%(options.destdir))
f.write('square_width = %f\n'%(options.square_width))
f.write('particle_spacing = %f\n'%(options.particle_spacing))
f.write('particle_radius = %f\n'%(options.particle_radius))
f.write('sph_interpolations = %d\n'%(options.sph_interpolations))
f.write('num_iterations = %d\n'%(options.num_iterations))
f.close()
return options
def create_particles(options):
if options.type == "square":
# create the square block of particles.
start_point = Point(0, 0, 0)
end_point = Point(options.square_width, options.square_width, 0)
parray = ParticleArray()
if rank == 0:
rg = RectangleGenerator(start_point=start_point,
end_point=end_point,
particle_spacing_x1=options.particle_spacing,
particle_spacing_x2=options.particle_spacing,
density_computation_mode=Dcm.Set_Constant,
particle_density=1000.0,
mass_computation_mode=Mcm.Compute_From_Density,
particle_h=options.particle_radius,
kernel=CubicSplineKernel(2),
filled=True)
tmp = rg.get_particles()
parray.append_parray(tmp)
if rank != 0:
# add some necessary properties to the particle array.
parray.add_property({'name':'x'})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'h', 'default':options.particle_radius})
parray.add_property({'name':'rho', 'default':1000.})
parray.add_property({'name':'pid'})
parray.add_property({'name':'_tmp', 'default':0.0})
parray.add_property({'name':'m'})
else:
parray.add_property({'name':'_tmp'})
parray.add_property({'name':'pid', 'default':0.0})
return [parray]
elif options.type == "dam_break":
dam_wall = ParticleArray()
dam_fluid = ParticleArray()
if rank == 0:
radius = 0.2
dam_width=10.0
dam_height=7.0
solid_particle_h=radius
dam_particle_spacing=radius/9.
solid_particle_mass=1.0
origin_x=origin_y=0.0
fluid_particle_h=radius
fluid_density=1000.
fluid_column_height=3.0
fluid_column_width=2.0
fluid_particle_spacing=radius
# generate the left wall - a line
lg = LineGenerator(particle_mass=solid_particle_mass,
mass_computation_mode=Mcm.Set_Constant,
density_computation_mode=Dcm.Ignore,
particle_h=solid_particle_h,
start_point=Point(0, 0, 0),
end_point=Point(0, dam_height, 0),
particle_spacing=dam_particle_spacing)
tmp = lg.get_particles()
dam_wall.append_parray(tmp)
# generate one half of the base
lg.start_point = Point(dam_particle_spacing, 0, 0)
lg.end_point = Point(dam_width/2, 0, 0)
tmp = lg.get_particles()
dam_wall.append_parray(tmp)
# generate particles for the left column of fluid.
rg = RectangleGenerator(
start_point=Point(origin_x+2.0*solid_particle_h,
origin_y+2.0*solid_particle_h,
0.0),
end_point=Point(origin_x+2.0*solid_particle_h+fluid_column_width,
origin_y+2.0*solid_particle_h+fluid_column_height, 0.0),
particle_spacing_x1=fluid_particle_spacing,
particle_spacing_x2=fluid_particle_spacing,
density_computation_mode=Dcm.Set_Constant,
mass_computation_mode=Mcm.Compute_From_Density,
particle_density=1000.,
particle_h=fluid_particle_h,
kernel=CubicSplineKernel(2),
filled=True)
dam_fluid = rg.get_particles()
# generate the right wall - a line
lg = LineGenerator(particle_mass=solid_particle_mass,
mass_computation_mode=Mcm.Set_Constant,
density_computation_mode=Dcm.Ignore,
particle_h=solid_particle_h,
start_point=Point(dam_width, 0, 0),
end_point=Point(dam_width, dam_height, 0),
particle_spacing=dam_particle_spacing)
tmp = lg.get_particles()
dam_wall.append_parray(tmp)
# generate the right half of the base
lg.start_point = Point(dam_width/2.+dam_particle_spacing, 0, 0)
lg.end_point = Point(dam_width, 0, 0)
tmp = lg.get_particles()
dam_wall.append_parray(tmp)
for parray in [dam_fluid, dam_wall]:
if rank != 0:
# add some necessary properties to the particle array.
parray.add_property({'name':'x'})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'h', 'default':options.particle_radius})
parray.add_property({'name':'rho', 'default':1000.})
parray.add_property({'name':'pid'})
parray.add_property({'name':'_tmp', 'default':0.0})
parray.add_property({'name':'m'})
else:
parray.add_property({'name':'_tmp'})
parray.add_property({'name':'pid', 'default':0.0})
return [dam_fluid, dam_wall]
def create_cell_manager(options):
print 'creating cell manager', options
# create a parallel cell manager.
cell_manager = ParallelCellManager(arrays_to_bin=[],
max_cell_scale=options.max_cell_scale,
dimension=2,
load_balancing=False,
initialize=False)
# enable load balancing
cell_manager.load_balancer = LoadBalancer(parallel_cell_manager=cell_manager)
cell_manager.load_balancer.skip_iteration = 1
cell_manager.load_balancer.threshold_ratio = 10.
for i,pa in enumerate(create_particles(options)):
cell_manager.arrays_to_bin.append(pa)
print 'parray %d:'%i, pa.get_number_of_particles()
cell_manager.initialize()
print 'num_particles', cell_manager.get_number_of_particles()
return cell_manager
def get_lb_args():
return [
dict(method='normal'),
dict(method='normal', adaptive=True),
dict(method='serial'),
dict(method='serial', adaptive=True),
dict(method='serial', distr_func='auto'),
dict(method='serial', distr_func='geometric'),
dict(method='serial_mkmeans', max_iter=200, c=0.3, t=0.2, tr=0.8, u=0.4, e=3, er=6, r=2.0),
dict(method='serial_sfc', sfc_func_name='morton'),
dict(method='serial_sfc', sfc_func_name='hilbert'),
dict(method='serial_metis'),
]
def get_desc_name(lbargs):
method = lbargs.get('method','')
adaptive = lbargs.get('adaptive', False)
if adaptive:
method += '_a'
sfcfunc = lbargs.get('sfc_func_name')
if sfcfunc:
method += '_' + sfcfunc
redistr_method = lbargs.get('distr_func')
if redistr_method:
method += '_' + redistr_method
return method
| Python |
''' Module to run bench modules which need to be run in mpi
This module imports the given module to run, and returns the result
of the bench functions of the modules. Also results are written to
mpirunner.log file
Usage:
1. Print the result in formatted form:
$ mpiexec -n <num_procs> python mpirunner.py <bench_name>
1. Print the result dictionary in pickled form (useful in automation):
$ mpiexec -n <num_procs> python mpirunner.py p <bench_name>
'''
from mpi4py import MPI
import sys
import pickle
rank = MPI.COMM_WORLD.Get_rank()
size = MPI.COMM_WORLD.Get_size()
def mpirun(args=None):
pkl = False
redir_op = True
if args is None:
comm = MPI.Comm.Get_parent()
#rank = comm.Get_rank()
bench_name = comm.bcast('', root=0)
else:
if args[0] == 'p':
pkl = True
bench_name = args[1]
elif args[0] == 'i':
redir_op = False
bench_name = args[1]
else:
bench_name = args[0]
logfile = open('mpirunner.log.%d'%rank, 'w')
stdout_orig = sys.stdout
stderr_orig = sys.stderr
if redir_op:
sys.stdout = sys.stderr = logfile
bench_mod = __import__(bench_name)
res = bench_mod.bench()
sys.stdout = stdout_orig
sys.stderr = stderr_orig
logfile.close()
if rank != 0: return
outtext = ''
s = bench_name.split('_',1)[1]+' %d\n'%size
s += '#'*len(s)
outtext += s + '\n'
for func in res:
for k in sorted(func.keys()):
s = k.ljust(40) + '\t%g'%func[k]
outtext += s + '\n'
outtext += '\n'
logfile = open('mpirunner.log', 'w')
logfile.write(outtext)
logfile.close()
if args is None:
comm.send(res, 0)
elif pkl:
sys.stdout.write(pickle.dumps(res))
else:
sys.stdout.write(outtext)
if __name__ == '__main__':
mpirun(sys.argv[1:])
| Python |
""" Time comparison for the Cython and OpenCL integrators.
We use the NBody integration example as the benchmark. Here, and all
neighbor locator is used. The setup consists of four points at the
vertices of the unit square in 2D.
"""
import numpy
from time import time
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
import pyopencl as cl
AllPairLocatorCython = base.NeighborLocatorType.NSquareNeighborLocator
AllPairLocatorOpenCL = base.OpenCLNeighborLocatorType.AllPairNeighborLocator
DomainManager = base.DomainManagerType.DomainManager
# constants
np = 1024
tf = 1.0
dt = 0.01
nsteps = tf/dt
# generate the particles
x = numpy.random.random(np)
y = numpy.random.random(np)
z = numpy.random.random(np)
m = numpy.random.random(np)
precision = "single"
ctx = solver.create_some_context()
pa1 = base.get_particle_array(name="cython", x=x, y=y, z=z, m=m)
pa2 = base.get_particle_array(name="opencl", cl_precision=precision,
x=x, y=y, z=z, m=m)
particles1 = base.Particles([pa1,], locator_type=AllPairLocatorCython)
particles2 = base.CLParticles([pa2, ])
kernel = base.CubicSplineKernel(dim=2)
# create the cython solver
solver1 = solver.Solver(dim=2, integrator_type=solver.EulerIntegrator)
solver1.add_operation(solver.SPHIntegration(
sph.NBodyForce.withargs(), on_types=[0], updates=['u','v'],
id="force")
)
solver1.add_operation_step(types=[0])
solver1.setup(particles1)
solver1.set_final_time(tf)
solver1.set_time_step(dt)
solver1.set_print_freq(nsteps + 1)
solver1.set_output_directory(".")
# create the OpenCL solver
solver2 = solver.Solver(dim=2, integrator_type=solver.EulerIntegrator)
solver2.add_operation(solver.SPHIntegration(
sph.NBodyForce.withargs(), on_types=[0], updates=['u','v'],
id="force")
)
solver2.add_operation_step(types=[0])
solver2.set_cl(True)
solver2.setup(particles2)
solver2.set_final_time(tf)
solver2.set_time_step(dt)
solver2.set_print_freq(nsteps + 1)
solver2.set_output_directory(".")
t1 = time()
solver1.solve()
cython_time = time() - t1
t1 = time()
solver2.solve()
opencl_time = time() - t1
pa2.read_from_buffer()
#print pa1.x - pa2.x
print sum(abs(pa1.x - pa2.x))/np
print "=================================================================="
print "OpenCL execution time = %g s"%opencl_time
print "Cython execution time = %g s"%cython_time
print "Speedup = %g"%(cython_time/opencl_time)
| Python |
""" Benchmark for the PySPH neighbor search functions. """
import sys
import numpy
import time
#PySPH imports
import pysph.base.api as base
def get_points(np = 10000):
""" Get np particles in domain [1, -1] X [-1, 1] """
x = numpy.random.random(np)*2.0 - 1.0
y = numpy.random.random(np)*2.0 - 1.0
z = numpy.random.random(np)*2.0 - 1.0
# h ~ 2*vol_per_particle
# rad ~ (2-3)*h => rad ~ 6*h
vol_per_particle = pow(4.0/np, 0.5)
radius = 6 * vol_per_particle
h = numpy.ones_like(x) * radius * 0.5
return x, y, z, h
def get_particle_array(x, y, z, h):
pdict = {}
pdict['x'] = {'name':'x', 'data':x}
pdict['y'] = {'name':'y', 'data':y}
pdict['z'] = {'name':'z', 'data':z}
pdict['h'] = {'name':'h', 'data':h}
pa = base.ParticleArray(**pdict)
return pa
def bin_particles(pa):
""" Bin the particles.
Parameters:
-----------
pa -- a newly created particle array from the get_particle_array function
min_cell_size -- the cell size to use for binning
"""
particles = base.Particles([pa,])
return particles
def cache_neighbors(particles):
""" Cache the neighbors for the particle array """
pa = particles.arrays[0]
loc = particles.get_neighbor_particle_locator(pa,pa,2.0)
loc.py_get_nearest_particles(0)
def get_stats(particles):
cd = particles.cell_manager.cells_dict
ncells = len(cd)
np_max = 0
_np = 0
for cid, cell in cd.iteritems():
np = cell.index_lists[0].length
_np += np
if np > np_max:
np_max = np
print "\n\n\n##############################################################"
print "CELL MANAGER DATA"
print "CellManager cell size ", particles.cell_manager.cell_size
print "Number of cells %d\t Particles/cell (avg) %f "%(ncells, _np/ncells),
print " Maximum %d particles"%(np_max)
if __name__ == '__main__':
if len(sys.argv) > 1:
np = sys.argv[-1]
x,y,z,h = get_points(np = int(sys.argv[-1]))
pa = get_particle_array(x,y,z,h)
else:
x,y,z,h = get_points()
pa = get_particle_array(x,y,z,h)
np = pa.get_number_of_particles()
print "Number of particles: ", np
vol_per_particle = pow(4.0/np, 0.5)
radius = 6 * vol_per_particle
print "Search Radius %f. "%(radius)
t = time.time()
particles = bin_particles(pa)
bt = time.time() - t
print "Time for binning: %f s" %(bt)
t = time.time()
cache_neighbors(particles)
ct = time.time() - t
print "Time for caching neighbors: %f s" %(ct)
print "\nTotal time %fs"%(bt + ct)
get_stats(particles)
| Python |
from setuptools import find_packages, setup
from Cython.Distutils import build_ext
from numpy.distutils.extension import Extension
ext_modules = [Extension("cython_nnps", ["cython_nnps.pyx"],
language="c++",
extra_compile_args=["-O3", "-Wall"]
),
Extension("nnps_bench", ["nnps_bench.pyx"],
language="c++",
extra_compile_args=["-O3", "-Wall"]
),
]
setup(
name = "Cython NNPS",
cmdclass = {'build_ext':build_ext},
ext_modules=ext_modules
)
| Python |
"""This module compiles the specified (all) the cython .pyx files
in the specified (current) directory into python extensions
"""
import sys
import os
from setuptools import setup
from Cython.Distutils import build_ext
from numpy.distutils.extension import Extension
import numpy
def get_spcl_extn(extn):
""" special-case extensions with specific requirements """
cpp_extensions = 'sph_funcs', 'nnps', 'cell', 'cpp_vs_pyx', 'cpp_extensions', 'nnps_brute_force'
if extn.name in cpp_extensions:
pass
#extn.sources.append('cPoint.cpp')
return extn
def compile_extns(extensions=None, dirname=None, inc_dirs=None):
"""compile cython extensions
`extensions` is list of extensions to compile (None => all pyx files)
`dirname` is directory in which extensions are found (None = current directory)
`inc_dirs` is list of additional cython include directories
"""
if dirname is None:
dirname = os.path.abspath(os.curdir)
olddir = os.path.abspath(os.curdir)
os.chdir(dirname)
if extensions is None:
extensions = sorted([f[:-4] for f in os.listdir(os.curdir) if f.endswith('.pyx')])
if inc_dirs is None:
inc_dirs = []
inc_dirs.append(os.path.join(os.path.split(os.path.abspath(os.path.curdir))[0],'source'))
print inc_dirs
sys.argvold = sys.argv[:]
sys.argv = [__file__, 'build_ext','--inplace']
inc_dirs = [numpy.get_include()] + inc_dirs
cargs = []#'-O3']
# extension modules
extns = []
for extnname in extensions:
extn = Extension(extnname, [extnname+".pyx"], include_dirs=inc_dirs,
language='c++', extra_compile_args=cargs)
extn = get_spcl_extn(extn)
extns.append(extn)
setup(name='PySPH-bench',
ext_modules = extns,
include_package_data = True,
cmdclass={'build_ext': build_ext},
)
os.chdir(olddir)
sys.argv = sys.argvold
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv:
print '''usage:
python setup.py [extension1, [extension2, [...]]]
compiles the cython extensions present in the current directory
'''
elif len(sys.argv) > 1:
# compile specified extensions
compile_extns(sys.argv[1:])
else:
# compile all extensions found in current directory
compile_extns()
| Python |
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
import numpy
import time
import pyopencl as cl
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
# number of particles
np = 1 << 20
# number of times a single calc is evaluated
neval = 5
x = numpy.linspace(0,1,np)
m = numpy.ones_like(x) * (x[1] - x[0])
h = 2*m
rho = numpy.ones_like(x)
# get the OpenCL context and device. Default to the first device
platforms = cl.get_platforms()
for platform in platforms:
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
devices = platform.get_devices()
for device in devices:
ctx = cl.Context([device])
print("===============================================================")
print("Device name:", device.name)
print("Device type:", cl.device_type.to_string(device.type))
print("Device memory: ", device.global_mem_size//1024//1024, 'MB')
print("Device max clock speed:", device.max_clock_frequency, 'MHz')
print("Device compute units:", device.max_compute_units)
precision_types = ['single']
device_extensions = device.get_info(cl.device_info.EXTENSIONS)
if 'cl_khr_fp64' in device_extensions:
precision_types.append('double')
for prec in precision_types:
print "--------------------------------------------------------"
print """Summation Density for %g million particles using %s precision"""%(np/1e6, prec)
pa = base.get_particle_array(cl_precision=prec,
name="test", x=x,h=h,m=m,rho=rho)
particles = base.Particles(arrays=[pa,])
cl_particles = base.CLParticles(
arrays=[pa,],
domain_manager_type=CLDomain.LinkedListManager,
cl_locator_type=CLLocator.LinkedListSPHNeighborLocator)
kernel = base.CubicSplineKernel(dim=1)
# create the function
func = sph.SPHRho.get_func(pa,pa)
# create the CLCalc object
t1 = time.time()
cl_calc = sph.CLCalc(particles=cl_particles,
sources=[pa,],
dest=pa,
kernel=kernel,
funcs=[func,],
updates=['rho'] )
cl_calc.reset_arrays = True
# setup OpenCL for PySPH
cl_calc.setup_cl(ctx)
cl_setup_time = time.time() - t1
# create a normal calc object
t1 = time.time()
calc = sph.SPHCalc(particles=particles, sources=[pa,], dest=pa,
kernel=kernel, funcs=[func,], updates=['rho'] )
cython_setup_time = time.time() - t1
# evaluate pysph on the OpenCL device!
t1 = time.time()
for i in range(neval):
cl_calc.sph()
cl_elapsed = time.time() - t1
# Read the buffer contents
t1 = time.time()
pa.read_from_buffer()
read_elapsed = time.time() - t1
print "\nPyOpenCL setup time = %g s"%(cl_setup_time)
print "PyOpenCL execution time = %g s" %(cl_elapsed)
print "PyOpenCL buffer transfer time: %g s "%(read_elapsed)
cl_rho = pa.get('_tmpx').copy()
# Do the same thing with Cython.
t1 = time.time()
for i in range(neval):
calc.sph('_tmpx')
cython_elapsed = time.time() - t1
print "Cython setup time = %g s"%(cython_setup_time)
print "Cython execution time = %g s" %(cython_elapsed)
cython_total = cython_setup_time + cython_elapsed
opencl_total = cl_setup_time + cl_elapsed + read_elapsed
# Compare the results
cython_rho = pa.get('_tmpx')
diff = sum(abs(cl_rho - cython_rho))
print "sum(abs(cl_rho - cy_rho))/np = ", diff/np
print "Execution speedup: %g"%(cython_elapsed/cl_elapsed)
print "Overall Speedup: %g "%(cython_total/opencl_total)
| Python |
""" Benchmark example for binning particles in Cython and OpenCL """
import numpy
import numpy.random as random
from time import time
import pysph.base.api as base
import pysph.solver.api as solver
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
# number of points
np = 2**20
# number of times to bin
nbins = 3
# generate the point set
x = random.random(np)
y = random.random(np)
z = random.random(np)
vol_per_particle = numpy.power(1.0/np, 1.0/3.0)
h = numpy.ones_like(x) * 2 * vol_per_particle
precision = "single"
ctx = solver.create_some_context()
pa = base.get_particle_array(name="test", cl_precision=precision,
x=x, y=y, z=z, h=h)
t1 = time()
for i in range(nbins):
particles = base.Particles([pa,])
pa.set_dirty(True)
cython_time = time() - t1
t1 = time()
cl_particles = base.CLParticles(
arrays=[pa,],
domain_manager_type=CLDomain.LinkedListManager,
cl_locator_type=CLLocator.LinkedListSPHNeighborLocator)
cl_particles.setup_cl(ctx)
domain_manager = cl_particles.domain_manager
for i in range(nbins - 1):
domain_manager.is_dirty = False
domain_manager.update()
opencl_time = time() - t1
print "================================================================"
print "Binning for %d particles using % s precision"%(np, precision)
print "PyOpenCL time = %g s"%(opencl_time)
print "Cython time = %g s"%(cython_time)
print "Speedup = %g"%(cython_time/opencl_time)
| Python |
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
import numpy
import time
import pyopencl as cl
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
# number of particles
np = 16384
# number of times a single calc is evaluated
neval = 1
x = numpy.linspace(0,1,np)
m = numpy.ones_like(x) * (x[1] - x[0])
h = 2*m
rho = numpy.ones_like(x)
# get the OpenCL context and device. Default to the first device
platforms = cl.get_platforms()
for platform in platforms:
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
devices = platform.get_devices()
for device in devices:
ctx = cl.Context([device])
print("===============================================================")
print("Device name:", device.name)
print("Device type:", cl.device_type.to_string(device.type))
print("Device memory: ", device.global_mem_size//1024//1024, 'MB')
print("Device max clock speed:", device.max_clock_frequency, 'MHz')
print("Device compute units:", device.max_compute_units)
precision_types = ['single']
device_extensions = device.get_info(cl.device_info.EXTENSIONS)
if 'cl_khr_fp64' in device_extensions:
precision_types.append('double')
for prec in precision_types:
print "--------------------------------------------------------"
print "NBody force comparison using %s precision"%(prec)
pa = base.get_particle_array(cl_precision=prec,
name="test", x=x,h=h,m=m,rho=rho)
particles = base.Particles(
arrays=[pa,],
locator_type=Locator.NSquareNeighborLocator)
cl_particles = base.CLParticles(
arrays=[pa,],
domain_manager_type=CLDomain.DomainManager,
cl_locator_type=CLLocator.AllPairNeighborLocator)
kernel = base.CubicSplineKernel(dim=1)
# create the function
func = sph.NBodyForce.get_func(pa,pa)
# create the CLCalc object
t1 = time.time()
cl_calc = sph.CLCalc(particles=cl_particles,
sources=[pa,],
dest=pa,
kernel=kernel,
funcs=[func,],
updates=['u','v','w'] )
# setup OpenCL for PySPH
cl_calc.setup_cl(ctx)
cl_setup_time = time.time() - t1
# create a normal calc object
t1 = time.time()
calc = sph.SPHCalc(particles=particles, sources=[pa,], dest=pa,
kernel=kernel, funcs=[func,],
updates=['u','v','w'] )
cython_setup_time = time.time() - t1
# evaluate pysph on the OpenCL device!
t1 = time.time()
for i in range(neval):
cl_calc.sph()
cl_elapsed = time.time() - t1
# Read the buffer contents
t1 = time.time()
pa.read_from_buffer()
read_elapsed = time.time() - t1
print "\nPyOpenCL setup time = %g s"%(cl_setup_time)
print "PyOpenCL execution time = %g s" %(cl_elapsed)
print "PyOpenCL buffer transfer time: %g s "%(read_elapsed)
cl_rho = pa.get('_tmpx').copy()
# Do the same thing with Cython.
t1 = time.time()
for i in range(neval):
calc.sph('_tmpx')
cython_elapsed = time.time() - t1
print "Cython setup time = %g s"%(cython_setup_time)
print "Cython execution time = %g s" %(cython_elapsed)
cython_total = cython_setup_time + cython_elapsed
opencl_total = cl_setup_time + cl_elapsed + read_elapsed
# Compare the results
cython_rho = pa.get('_tmpx')
diff = sum(abs(cl_rho - cython_rho))
print "sum(abs(cl_rho - cy_rho))/np = ", diff/np
print "Execution speedup: %g"%(cython_elapsed/cl_elapsed)
print "Overall Speedup: %g "%(cython_total/opencl_total)
| Python |
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
import textwrap
import re
import pydoc
from StringIO import StringIO
from warnings import warn
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data,list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self,docstring):
docstring = docstring.split('\n')
# De-indent paragraph
try:
indent = min(len(s) - len(s.lstrip()) for s in docstring
if s.strip())
except ValueError:
indent = 0
for n,line in enumerate(docstring):
docstring[n] = docstring[n][indent:]
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self,key):
return self._parsed_data[key]
def __setitem__(self,key,val):
if not self._parsed_data.has_key(key):
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ----------
return l2.startswith('-'*len(l1))
def _strip(self,doc):
i = 0
j = 0
for i,line in enumerate(doc):
if line.strip(): break
for j,line in enumerate(doc[::-1]):
if line.strip(): break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
for n,line in enumerate(desc):
desc[n] = line.strip()
desc = desc #'\n'.join(desc)
params.append((arg_name,arg_type,desc))
return params
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, func_name3
"""
functions = []
current_func = None
rest = []
for line in content:
if not line.strip(): continue
if ':' in line:
if current_func:
functions.append((current_func, rest))
r = line.split(':', 1)
current_func = r[0].strip()
r[1] = r[1].strip()
if r[1]:
rest = [r[1]]
else:
rest = []
elif not line.startswith(' '):
if current_func:
functions.append((current_func, rest))
current_func = None
rest = []
if ',' in line:
for func in line.split(','):
func = func.strip()
if func:
functions.append((func, []))
elif line.strip():
current_func = line.strip()
elif current_func is not None:
rest.append(line.strip())
if current_func:
functions.append((current_func, rest))
return functions
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section,content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize() for s in section.split(' ')])
if section in ('Parameters', 'Attributes', 'Methods',
'Returns', 'Raises', 'Warns'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*','\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param,param_type,desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']: return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc in self['See Also']:
if func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent(desc)
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters','Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_see_also(func_role)
for s in ('Notes','References','Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
def indent(str,indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func'):
self._f = func
self._role = role # e.g. "func" or "meth"
try:
NumpyDocString.__init__(self,inspect.getdoc(func) or '')
except ValueError, e:
print '*'*78
print "ERROR: '%s' while parsing `%s`" % (e, self._f)
print '*'*78
#print "Docstring follows:"
#print doclines
#print '='*78
if not self['Signature']:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*','\*')
signature = '%s%s' % (func_name, argspec)
except TypeError, e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if hasattr(self._f, '__class__') or inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if not roles.has_key(self._role):
print "Warning: invalid role %s" % self._role
out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
def __init__(self,cls,modulename='',func_doc=FunctionDoc):
if not inspect.isclass(cls):
raise ValueError("Initialise using a class. Got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
self._name = cls.__name__
self._func_doc = func_doc
NumpyDocString.__init__(self, pydoc.getdoc(cls))
@property
def methods(self):
return [name for name,func in inspect.getmembers(self._cls)
if not name.startswith('_') and callable(func)]
def __str__(self):
out = ''
out += super(ClassDoc, self).__str__()
out += "\n\n"
#for m in self.methods:
# print "Parsing `%s`" % m
# out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
# out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
return out
| Python |
from cStringIO import StringIO
import compiler
import inspect
import textwrap
import tokenize
from compiler_unparse import unparse
class Comment(object):
""" A comment block.
"""
is_comment = True
def __init__(self, start_lineno, end_lineno, text):
# int : The first line number in the block. 1-indexed.
self.start_lineno = start_lineno
# int : The last line number. Inclusive!
self.end_lineno = end_lineno
# str : The text block including '#' character but not any leading spaces.
self.text = text
def add(self, string, start, end, line):
""" Add a new comment line.
"""
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
self.text += string
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno, self.text)
class NonComment(object):
""" A non-comment block of code.
"""
is_comment = False
def __init__(self, start_lineno, end_lineno):
self.start_lineno = start_lineno
self.end_lineno = end_lineno
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno)
class CommentBlocker(object):
""" Pull out contiguous comment blocks.
"""
def __init__(self):
# Start with a dummy.
self.current_block = NonComment(0, 0)
# All of the blocks seen so far.
self.blocks = []
# The index mapping lines of code to their associated comment blocks.
self.index = {}
def process_file(self, file):
""" Process a file object.
"""
for token in tokenize.generate_tokens(file.next):
self.process_token(*token)
self.make_index()
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
| Python |
""" Turn compiler.ast structures back into executable python code.
The unparse method takes a compiler.ast tree and transforms it back into
valid python code. It is incomplete and currently only works for
import statements, function calls, function definitions, assignments, and
basic expressions.
Inspired by python-2.5-svn/Demo/parser/unparse.py
fixme: We may want to move to using _ast trees because the compiler for
them is about 6 times faster than compiler.compile.
"""
import sys
import cStringIO
from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
def unparse(ast, single_line_functions=False):
s = cStringIO.StringIO()
UnparseCompilerAst(ast, s, single_line_functions)
return s.getvalue().lstrip()
op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
class UnparseCompilerAst:
""" Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarged.
"""
#########################################################################
# object interface.
#########################################################################
def __init__(self, tree, file = sys.stdout, single_line_functions=False):
""" Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file.
"""
self.f = file
self._single_func = single_line_functions
self._do_indent = True
self._indent = 0
self._dispatch(tree)
self._write("\n")
self.f.flush()
#########################################################################
# Unparser private interface.
#########################################################################
### format, output, and dispatch methods ################################
def _fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
if self._do_indent:
self._write("\n"+" "*self._indent + text)
else:
self._write(text)
def _write(self, text):
"Append a piece of text to the current line."
self.f.write(text)
def _enter(self):
"Print ':', and increase the indentation."
self._write(": ")
self._indent += 1
def _leave(self):
"Decrease the indentation level."
self._indent -= 1
def _dispatch(self, tree):
"_dispatcher function, _dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self._dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
return
meth(tree)
#########################################################################
# compiler.ast unparsing methods.
#
# There should be one method per concrete grammar type. They are
# organized in alphabetical order.
#########################################################################
def _Add(self, t):
self.__binary_op(t, '+')
def _And(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") and (")
self._write(")")
def _AssAttr(self, t):
""" Handle assigning an attribute of an object
"""
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _Assign(self, t):
""" Expression Assignment such as "a = 1".
This only handles assignment in expressions. Keyword assignment
is handled separately.
"""
self._fill()
for target in t.nodes:
self._dispatch(target)
self._write(" = ")
self._dispatch(t.expr)
if not self._do_indent:
self._write('; ')
def _AssName(self, t):
""" Name on left hand side of expression.
Treat just like a name on the right side of an expression.
"""
self._Name(t)
def _AssTuple(self, t):
""" Tuple on left hand side of an expression.
"""
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
def _AugAssign(self, t):
""" +=,-=,*=,/=,**=, etc. operations
"""
self._fill()
self._dispatch(t.node)
self._write(' '+t.op+' ')
self._dispatch(t.expr)
if not self._do_indent:
self._write(';')
def _Bitand(self, t):
""" Bit and operation.
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" & ")
def _Bitor(self, t):
""" Bit or operation
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" | ")
def _CallFunc(self, t):
""" Function call.
"""
self._dispatch(t.node)
self._write("(")
comma = False
for e in t.args:
if comma: self._write(", ")
else: comma = True
self._dispatch(e)
if t.star_args:
if comma: self._write(", ")
else: comma = True
self._write("*")
self._dispatch(t.star_args)
if t.dstar_args:
if comma: self._write(", ")
else: comma = True
self._write("**")
self._dispatch(t.dstar_args)
self._write(")")
def _Compare(self, t):
self._dispatch(t.expr)
for op, expr in t.ops:
self._write(" " + op + " ")
self._dispatch(expr)
def _Const(self, t):
""" A constant value such as an integer value, 3, or a string, "hello".
"""
self._dispatch(t.value)
def _Decorators(self, t):
""" Handle function decorators (eg. @has_units)
"""
for node in t.nodes:
self._dispatch(node)
def _Dict(self, t):
self._write("{")
for i, (k, v) in enumerate(t.items):
self._dispatch(k)
self._write(": ")
self._dispatch(v)
if i < len(t.items)-1:
self._write(", ")
self._write("}")
def _Discard(self, t):
""" Node for when return value is ignored such as in "foo(a)".
"""
self._fill()
self._dispatch(t.expr)
def _Div(self, t):
self.__binary_op(t, '/')
def _Ellipsis(self, t):
self._write("...")
def _From(self, t):
""" Handle "from xyz import foo, bar as baz".
"""
# fixme: Are From and ImportFrom handled differently?
self._fill("from ")
self._write(t.modname)
self._write(" import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Function(self, t):
""" Handle function definitions
"""
if t.decorators is not None:
self._fill("@")
self._dispatch(t.decorators)
self._fill("def "+t.name + "(")
defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
for i, arg in enumerate(zip(t.argnames, defaults)):
self._write(arg[0])
if arg[1] is not None:
self._write('=')
self._dispatch(arg[1])
if i < len(t.argnames)-1:
self._write(', ')
self._write(")")
if self._single_func:
self._do_indent = False
self._enter()
self._dispatch(t.code)
self._leave()
self._do_indent = True
def _Getattr(self, t):
""" Handle getting an attribute of an object
"""
if isinstance(t.expr, (Div, Mul, Sub, Add)):
self._write('(')
self._dispatch(t.expr)
self._write(')')
else:
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _If(self, t):
self._fill()
for i, (compare,code) in enumerate(t.tests):
if i == 0:
self._write("if ")
else:
self._write("elif ")
self._dispatch(compare)
self._enter()
self._fill()
self._dispatch(code)
self._leave()
self._write("\n")
if t.else_ is not None:
self._write("else")
self._enter()
self._fill()
self._dispatch(t.else_)
self._leave()
self._write("\n")
def _IfExp(self, t):
self._dispatch(t.then)
self._write(" if ")
self._dispatch(t.test)
if t.else_ is not None:
self._write(" else (")
self._dispatch(t.else_)
self._write(")")
def _Import(self, t):
""" Handle "import xyz.foo".
"""
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Keyword(self, t):
""" Keyword value assignment within function calls and definitions.
"""
self._write(t.name)
self._write("=")
self._dispatch(t.expr)
def _List(self, t):
self._write("[")
for i,node in enumerate(t.nodes):
self._dispatch(node)
if i < len(t.nodes)-1:
self._write(", ")
self._write("]")
def _Module(self, t):
if t.doc is not None:
self._dispatch(t.doc)
self._dispatch(t.node)
def _Mul(self, t):
self.__binary_op(t, '*')
def _Name(self, t):
self._write(t.name)
def _NoneType(self, t):
self._write("None")
def _Not(self, t):
self._write('not (')
self._dispatch(t.expr)
self._write(')')
def _Or(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") or (")
self._write(")")
def _Pass(self, t):
self._write("pass\n")
def _Printnl(self, t):
self._fill("print ")
if t.dest:
self._write(">> ")
self._dispatch(t.dest)
self._write(", ")
comma = False
for node in t.nodes:
if comma: self._write(', ')
else: comma = True
self._dispatch(node)
def _Power(self, t):
self.__binary_op(t, '**')
def _Return(self, t):
self._fill("return ")
if t.value:
if isinstance(t.value, Tuple):
text = ', '.join([ name.name for name in t.value.asList() ])
self._write(text)
else:
self._dispatch(t.value)
if not self._do_indent:
self._write('; ')
def _Slice(self, t):
self._dispatch(t.expr)
self._write("[")
if t.lower:
self._dispatch(t.lower)
self._write(":")
if t.upper:
self._dispatch(t.upper)
#if t.step:
# self._write(":")
# self._dispatch(t.step)
self._write("]")
def _Sliceobj(self, t):
for i, node in enumerate(t.nodes):
if i != 0:
self._write(":")
if not (isinstance(node, Const) and node.value is None):
self._dispatch(node)
def _Stmt(self, tree):
for node in tree.nodes:
self._dispatch(node)
def _Sub(self, t):
self.__binary_op(t, '-')
def _Subscript(self, t):
self._dispatch(t.expr)
self._write("[")
for i, value in enumerate(t.subs):
if i != 0:
self._write(",")
self._dispatch(value)
self._write("]")
def _TryExcept(self, t):
self._fill("try")
self._enter()
self._dispatch(t.body)
self._leave()
for handler in t.handlers:
self._fill('except ')
self._dispatch(handler[0])
if handler[1] is not None:
self._write(', ')
self._dispatch(handler[1])
self._enter()
self._dispatch(handler[2])
self._leave()
if t.else_:
self._fill("else")
self._enter()
self._dispatch(t.else_)
self._leave()
def _Tuple(self, t):
if not t.nodes:
# Empty tuple.
self._write("()")
else:
self._write("(")
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
self._write(")")
def _UnaryAdd(self, t):
self._write("+")
self._dispatch(t.expr)
def _UnarySub(self, t):
self._write("-")
self._dispatch(t.expr)
def _With(self, t):
self._fill('with ')
self._dispatch(t.expr)
if t.vars:
self._write(' as ')
self._dispatch(t.vars.name)
self._enter()
self._dispatch(t.body)
self._leave()
self._write('\n')
def _int(self, t):
self._write(repr(t))
def __binary_op(self, t, symbol):
# Check if parenthesis are needed on left side and then dispatch
has_paren = False
left_class = str(t.left.__class__)
if (left_class in op_precedence.keys() and
op_precedence[left_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.left)
if has_paren:
self._write(')')
# Write the appropriate symbol for operator
self._write(symbol)
# Check if parenthesis are needed on the right side and then dispatch
has_paren = False
right_class = str(t.right.__class__)
if (right_class in op_precedence.keys() and
op_precedence[right_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.right)
if has_paren:
self._write(')')
def _float(self, t):
# if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
# We prefer str here.
self._write(str(t))
def _str(self, t):
self._write(repr(t))
def _tuple(self, t):
self._write(str(t))
#########################################################################
# These are the methods from the _ast modules unparse.
#
# As our needs to handle more advanced code increase, we may want to
# modify some of the methods below so that they work for compiler.ast.
#########################################################################
# # stmt
# def _Expr(self, tree):
# self._fill()
# self._dispatch(tree.value)
#
# def _Import(self, t):
# self._fill("import ")
# first = True
# for a in t.names:
# if first:
# first = False
# else:
# self._write(", ")
# self._write(a.name)
# if a.asname:
# self._write(" as "+a.asname)
#
## def _ImportFrom(self, t):
## self._fill("from ")
## self._write(t.module)
## self._write(" import ")
## for i, a in enumerate(t.names):
## if i == 0:
## self._write(", ")
## self._write(a.name)
## if a.asname:
## self._write(" as "+a.asname)
## # XXX(jpe) what is level for?
##
#
# def _Break(self, t):
# self._fill("break")
#
# def _Continue(self, t):
# self._fill("continue")
#
# def _Delete(self, t):
# self._fill("del ")
# self._dispatch(t.targets)
#
# def _Assert(self, t):
# self._fill("assert ")
# self._dispatch(t.test)
# if t.msg:
# self._write(", ")
# self._dispatch(t.msg)
#
# def _Exec(self, t):
# self._fill("exec ")
# self._dispatch(t.body)
# if t.globals:
# self._write(" in ")
# self._dispatch(t.globals)
# if t.locals:
# self._write(", ")
# self._dispatch(t.locals)
#
# def _Print(self, t):
# self._fill("print ")
# do_comma = False
# if t.dest:
# self._write(">>")
# self._dispatch(t.dest)
# do_comma = True
# for e in t.values:
# if do_comma:self._write(", ")
# else:do_comma=True
# self._dispatch(e)
# if not t.nl:
# self._write(",")
#
# def _Global(self, t):
# self._fill("global")
# for i, n in enumerate(t.names):
# if i != 0:
# self._write(",")
# self._write(" " + n)
#
# def _Yield(self, t):
# self._fill("yield")
# if t.value:
# self._write(" (")
# self._dispatch(t.value)
# self._write(")")
#
# def _Raise(self, t):
# self._fill('raise ')
# if t.type:
# self._dispatch(t.type)
# if t.inst:
# self._write(", ")
# self._dispatch(t.inst)
# if t.tback:
# self._write(", ")
# self._dispatch(t.tback)
#
#
# def _TryFinally(self, t):
# self._fill("try")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# self._fill("finally")
# self._enter()
# self._dispatch(t.finalbody)
# self._leave()
#
# def _excepthandler(self, t):
# self._fill("except ")
# if t.type:
# self._dispatch(t.type)
# if t.name:
# self._write(", ")
# self._dispatch(t.name)
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _ClassDef(self, t):
# self._write("\n")
# self._fill("class "+t.name)
# if t.bases:
# self._write("(")
# for a in t.bases:
# self._dispatch(a)
# self._write(", ")
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _FunctionDef(self, t):
# self._write("\n")
# for deco in t.decorators:
# self._fill("@")
# self._dispatch(deco)
# self._fill("def "+t.name + "(")
# self._dispatch(t.args)
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _For(self, t):
# self._fill("for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# def _While(self, t):
# self._fill("while ")
# self._dispatch(t.test)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# # expr
# def _Str(self, tree):
# self._write(repr(tree.s))
##
# def _Repr(self, t):
# self._write("`")
# self._dispatch(t.value)
# self._write("`")
#
# def _Num(self, t):
# self._write(repr(t.n))
#
# def _ListComp(self, t):
# self._write("[")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write("]")
#
# def _GeneratorExp(self, t):
# self._write("(")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write(")")
#
# def _comprehension(self, t):
# self._write(" for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# for if_clause in t.ifs:
# self._write(" if ")
# self._dispatch(if_clause)
#
# def _IfExp(self, t):
# self._dispatch(t.body)
# self._write(" if ")
# self._dispatch(t.test)
# if t.orelse:
# self._write(" else ")
# self._dispatch(t.orelse)
#
# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
# def _UnaryOp(self, t):
# self._write(self.unop[t.op.__class__.__name__])
# self._write("(")
# self._dispatch(t.operand)
# self._write(")")
#
# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
# "FloorDiv":"//", "Pow": "**"}
# def _BinOp(self, t):
# self._write("(")
# self._dispatch(t.left)
# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
# self._dispatch(t.right)
# self._write(")")
#
# boolops = {_ast.And: 'and', _ast.Or: 'or'}
# def _BoolOp(self, t):
# self._write("(")
# self._dispatch(t.values[0])
# for v in t.values[1:]:
# self._write(" %s " % self.boolops[t.op.__class__])
# self._dispatch(v)
# self._write(")")
#
# def _Attribute(self,t):
# self._dispatch(t.value)
# self._write(".")
# self._write(t.attr)
#
## def _Call(self, t):
## self._dispatch(t.func)
## self._write("(")
## comma = False
## for e in t.args:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## for e in t.keywords:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## if t.starargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("*")
## self._dispatch(t.starargs)
## if t.kwargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("**")
## self._dispatch(t.kwargs)
## self._write(")")
#
# # slice
# def _Index(self, t):
# self._dispatch(t.value)
#
# def _ExtSlice(self, t):
# for i, d in enumerate(t.dims):
# if i != 0:
# self._write(': ')
# self._dispatch(d)
#
# # others
# def _arguments(self, t):
# first = True
# nonDef = len(t.args)-len(t.defaults)
# for a in t.args[0:nonDef]:
# if first:first = False
# else: self._write(", ")
# self._dispatch(a)
# for a,d in zip(t.args[nonDef:], t.defaults):
# if first:first = False
# else: self._write(", ")
# self._dispatch(a),
# self._write("=")
# self._dispatch(d)
# if t.vararg:
# if first:first = False
# else: self._write(", ")
# self._write("*"+t.vararg)
# if t.kwarg:
# if first:first = False
# else: self._write(", ")
# self._write("**"+t.kwarg)
#
## def _keyword(self, t):
## self._write(t.arg)
## self._write("=")
## self._dispatch(t.value)
#
# def _Lambda(self, t):
# self._write("lambda ")
# self._dispatch(t.args)
# self._write(": ")
# self._dispatch(t.body)
| Python |
import inspect
import os
import pydoc
import docscrape
from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc
import numpydoc
import comment_eater
class SphinxTraitsDoc(SphinxClassDoc):
def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
if not inspect.isclass(cls):
raise ValueError("Initialise using a class. Got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
self._name = cls.__name__
self._func_doc = func_doc
docstring = pydoc.getdoc(cls)
docstring = docstring.split('\n')
# De-indent paragraph
try:
indent = min(len(s) - len(s.lstrip()) for s in docstring
if s.strip())
except ValueError:
indent = 0
for n,line in enumerate(docstring):
docstring[n] = docstring[n][indent:]
self._doc = docscrape.Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': '',
'Description': [],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Traits': [],
'Methods': [],
'See Also': [],
'Notes': [],
'References': '',
'Example': '',
'Examples': '',
'index': {}
}
self._parse()
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Description'] + self['Extended Summary'] + ['']
def __str__(self, indent=0, func_role="func"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Traits', 'Methods',
'Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_see_also("obj")
out += self._str_section('Notes')
out += self._str_references()
out += self._str_section('Example')
out += self._str_section('Examples')
out = self._str_indent(out,indent)
return '\n'.join(out)
def looks_like_issubclass(obj, classname):
""" Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes.
"""
t = obj
if t.__name__ == classname:
return True
for klass in t.__mro__:
if klass.__name__ == classname:
return True
return False
def get_doc_object(obj, what=None):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
doc = SphinxTraitsDoc(obj, '', func_doc=numpydoc.SphinxFunctionDoc)
if looks_like_issubclass(obj, 'HasTraits'):
for name, trait, comment in comment_eater.get_class_traits(obj):
# Exclude private traits.
if not name.startswith('_'):
doc['Traits'].append((name, trait, comment.splitlines()))
return doc
elif what in ('function', 'method'):
return numpydoc.SphinxFunctionDoc(obj, '')
else:
return numpydoc.SphinxDocString(pydoc.getdoc(obj))
def initialize(app):
try:
app.connect('autodoc-process-signature', numpydoc.mangle_signature)
except:
numpydoc.monkeypatch_sphinx_ext_autodoc()
# Monkeypatch numpydoc
numpydoc.get_doc_object = get_doc_object
fn = app.config.numpydoc_phantom_import_file
if (fn and os.path.isfile(fn)):
print "[numpydoc] Phantom importing modules from", fn, "..."
numpydoc.import_phantom_module(fn)
def setup(app):
app.connect('autodoc-process-docstring', numpydoc.mangle_docstrings)
app.connect('builder-inited', initialize)
app.add_config_value('numpydoc_phantom_import_file', None, True)
app.add_config_value('numpydoc_edit_link', None, True)
app.add_directive('autosummary', numpydoc.autosummary_directive, 1, (0, 0, False))
app.add_role('autolink', numpydoc.autolink_role)
| Python |
import re, textwrap
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
return out
def __str__(self, indent=0, func_role="func"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Attributes', 'Methods',
'Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_see_also("obj")
out += self._str_section('Notes')
out += self._str_references()
out += self._str_section('Examples')
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
pass
class SphinxClassDoc(SphinxDocString, ClassDoc):
pass
| Python |
import os, re, pydoc
from docscrape_sphinx import SphinxDocString, SphinxClassDoc, SphinxFunctionDoc
import inspect
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
if what == 'module':
# Strip top title
title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I|re.S)
lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
else:
doc = get_doc_object(obj, what)
lines[:] = str(doc).split("\n")
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
v = dict(full_name=obj.__name__)
lines += [''] + (app.config.numpydoc_edit_link % v).split("\n")
# replace reference numbers so that there are no duplicates
references = []
for l in lines:
l = l.strip()
if l.startswith('.. ['):
try:
references.append(int(l[len('.. ['):l.index(']')]))
except ValueError:
print "WARNING: invalid reference in %s docstring" % name
# Start renaming from the biggest number, otherwise we may
# overwrite references.
references.sort()
if references:
for i, line in enumerate(lines):
for r in references:
new_r = reference_offset[0] + r
lines[i] = lines[i].replace('[%d]_' % r,
'[%d]_' % new_r)
lines[i] = lines[i].replace('.. [%d]' % r,
'.. [%d]' % new_r)
reference_offset[0] += len(references)
def get_doc_object(obj, what=None):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, '')
else:
return SphinxDocString(pydoc.getdoc(obj))
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
'initializes x; see ' in pydoc.getdoc(obj.__init__)):
return '', ''
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
if not hasattr(obj, '__doc__'): return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub("^[^(]*", "", doc['Signature'])
return sig, ''
def initialize(app):
try:
app.connect('autodoc-process-signature', mangle_signature)
except:
monkeypatch_sphinx_ext_autodoc()
fn = app.config.numpydoc_phantom_import_file
if (fn and os.path.isfile(fn)):
print "[numpydoc] Phantom importing modules from", fn, "..."
import_phantom_module(fn)
def setup(app):
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('builder-inited', initialize)
app.add_config_value('numpydoc_phantom_import_file', None, True)
app.add_config_value('numpydoc_edit_link', None, True)
app.add_directive('autosummary', autosummary_directive, 1, (0, 0, False))
app.add_role('autolink', autolink_role)
#------------------------------------------------------------------------------
# .. autosummary::
#------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from docutils import nodes
import sphinx.addnodes, sphinx.roles
from sphinx.util import patfilter
import posixpath
def autosummary_directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Pretty table containing short signatures and summaries of functions etc.
autosummary also generates a (hidden) toctree:: node.
"""
# XXX: make the signatures and signature abbreviations optional
names = []
names += [x for x in content if x.strip()]
result, warnings, titles = get_autosummary(names, state.document)
node = nodes.paragraph()
state.nested_parse(result, 0, node)
env = state.document.settings.env
suffix = env.config.source_suffix
all_docnames = env.found_docs.copy()
dirname = posixpath.dirname(env.docname)
docnames = []
doctitles = {}
for name in titles.keys():
docname = 'generated/' + name
doctitles[docname] = ""
doctitles[docname + '.xhtml'] = ""
if docname.endswith(suffix):
docname = docname[:-len(suffix)]
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
warnings.append(state.document.reporter.warning(
'toctree references unknown document %r' % docname,
line=lineno))
docnames.append(docname)
tocnode = sphinx.addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['includetitles'] = doctitles
tocnode['maxdepth'] = -1
tocnode['glob'] = None
return warnings + node.children + [tocnode]
def get_autosummary(names, document):
"""
Generate a proper table node for autosummary:: directive.
Parameters
----------
names : list of str
Names of Python objects to be imported and added to the table.
document : document
Docutils document object
"""
result = ViewList()
warnings = []
titles = {}
prefixes = ['']
prefixes.insert(0, document.settings.env.currmodule)
rows = []
for name in names:
try:
obj, real_name = import_by_name(name, prefixes=prefixes)
except ImportError:
warnings.append(document.reporter.warning(
'failed to import %s' % name))
rows.append((":obj:`%s`" % name, ""))
continue
doc = get_doc_object(obj)
if doc['Summary']:
titles[real_name] = " ".join(doc['Summary'])
else:
titles[real_name] = ""
col1 = ":obj:`%s`" % name
if doc['Signature']:
sig = re.sub('^[a-zA-Z_0-9.-]*', '',
doc['Signature'].replace('*', r'\*'))
if '=' in sig:
# abbreviate optional arguments
sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1)
sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1)
sig = re.sub(r'=[^,)]+,', ',', sig)
sig = re.sub(r'=[^,)]+\)$', '])', sig)
# shorten long strings
sig = re.sub(r'(\[.{16,16}[^,)]*?),.*?\]\)', r'\1, ...])', sig)
else:
sig = re.sub(r'(\(.{16,16}[^,)]*?),.*?\)', r'\1, ...)', sig)
col1 += " " + sig
col2 = titles[real_name]
rows.append((col1, col2))
if not rows:
return result, warnings, titles
max_name_len = max([len(x[0]) for x in rows])
row_fmt = "%%-%ds %%s" % max_name_len
table_banner = ('='*max_name_len) + ' ' + '==============='
result.append(table_banner, '<autosummary>')
for row in rows:
result.append(row_fmt % row, '<autosummary>')
result.append(table_banner, '<autosummary>')
result.append('', '<autosummary>')
return result, warnings, titles
def import_by_name(name, prefixes=[None]):
"""
Import a Python object that has the given name, under one of the prefixes.
Parameters
----------
name : str
Name of a Python object, eg. 'numpy.ndarray.view'
prefixes : list of (str or None), optional
Prefixes to prepend to the name (None implies no prefix).
The first prefixed name that results to successful import is used.
Returns
-------
obj
The imported object
name
Name of the imported object (useful if `prefixes` was used)
"""
for prefix in prefixes:
try:
if prefix:
prefixed_name = '.'.join([prefix, name])
else:
prefixed_name = name
return _import_by_name(prefixed_name), prefixed_name
except ImportError:
pass
raise ImportError
def _import_by_name(name):
"""Import a Python object given its full name"""
try:
name_parts = name.split('.')
last_j = 0
modname = None
for j in reversed(range(1, len(name_parts)+1)):
last_j = j
modname = '.'.join(name_parts[:j])
try:
__import__(modname)
except ImportError:
continue
if modname in sys.modules:
break
if last_j < len(name_parts):
obj = sys.modules[modname]
for obj_name in name_parts[last_j:]:
obj = getattr(obj, obj_name)
return obj
else:
return sys.modules[modname]
except (ValueError, ImportError, AttributeError, KeyError), e:
raise ImportError(e)
#------------------------------------------------------------------------------
# :autolink: (smart default role)
#------------------------------------------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
"""
Smart linking role.
Expands to ":obj:`text`" if `text` is an object that can be imported;
otherwise expands to "*text*".
"""
r = sphinx.roles.xfileref_role('obj', rawtext, etext, lineno, inliner,
options, content)
pnode = r[0][0]
prefixes = [None]
#prefixes.insert(0, inliner.document.settings.env.currmodule)
try:
obj, name = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
classes=content['classes'])
return r
#------------------------------------------------------------------------------
# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
#------------------------------------------------------------------------------
def monkeypatch_sphinx_ext_autodoc():
global _original_format_signature
import sphinx.ext.autodoc
if sphinx.ext.autodoc.format_signature is our_format_signature:
return
print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
_original_format_signature = sphinx.ext.autodoc.format_signature
sphinx.ext.autodoc.format_signature = our_format_signature
def our_format_signature(what, obj):
r = mangle_signature(None, what, None, obj, None, None, None)
if r is not None:
return r[0]
else:
return _original_format_signature(what, obj)
#------------------------------------------------------------------------------
# Creating 'phantom' modules from an XML description
#------------------------------------------------------------------------------
import imp, sys, compiler, types
def import_phantom_module(xml_file):
"""
Insert a fake Python module to sys.modules, based on a XML file.
The XML file is expected to conform to Pydocweb DTD. The fake
module will contain dummy objects, which guarantee the following:
- Docstrings are correct.
- Class inheritance relationships are correct (if present in XML).
- Function argspec is *NOT* correct (even if present in XML).
Instead, the function signature is prepended to the function docstring.
- Class attributes are *NOT* correct; instead, they are dummy objects.
Parameters
----------
xml_file : str
Name of an XML file to read
"""
import lxml.etree as etree
object_cache = {}
tree = etree.parse(xml_file)
root = tree.getroot()
# Sort items so that
# - Base classes come before classes inherited from them
# - Modules come before their contents
all_nodes = dict([(n.attrib['id'], n) for n in root])
def _get_bases(node, recurse=False):
bases = [x.attrib['ref'] for x in node.findall('base')]
if recurse:
j = 0
while True:
try:
b = bases[j]
except IndexError: break
if b in all_nodes:
bases.extend(_get_bases(all_nodes[b]))
j += 1
return bases
type_index = ['module', 'class', 'callable', 'object']
def base_cmp(a, b):
x = cmp(type_index.index(a.tag), type_index.index(b.tag))
if x != 0: return x
if a.tag == 'class' and b.tag == 'class':
a_bases = _get_bases(a, recurse=True)
b_bases = _get_bases(b, recurse=True)
x = cmp(len(a_bases), len(b_bases))
if x != 0: return x
if a.attrib['id'] in b_bases: return -1
if b.attrib['id'] in a_bases: return 1
return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
nodes = root.getchildren()
nodes.sort(base_cmp)
# Create phantom items
for node in nodes:
name = node.attrib['id']
doc = (node.text or '').decode('string-escape') + "\n"
if doc == "\n": doc = ""
# create parent, if missing
parent = name
while True:
parent = '.'.join(parent.split('.')[:-1])
if not parent: break
if parent in object_cache: break
obj = imp.new_module(parent)
object_cache[parent] = obj
sys.modules[parent] = obj
# create object
if node.tag == 'module':
obj = imp.new_module(name)
obj.__doc__ = doc
sys.modules[name] = obj
elif node.tag == 'class':
bases = [object_cache[b] for b in _get_bases(node)
if b in object_cache]
bases.append(object)
init = lambda self: None
init.__doc__ = doc
obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
obj.__name__ = name.split('.')[-1]
elif node.tag == 'callable':
funcname = node.attrib['id'].split('.')[-1]
argspec = node.attrib.get('argspec')
if argspec:
argspec = re.sub('^[^(]*', '', argspec)
doc = "%s%s\n\n%s" % (funcname, argspec, doc)
obj = lambda: 0
obj.__argspec_is_invalid_ = True
obj.func_name = funcname
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__objclass__ = object_cache[parent]
else:
class Dummy(object): pass
obj = Dummy()
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__get__ = lambda: None
object_cache[name] = obj
if parent:
if inspect.ismodule(object_cache[parent]):
obj.__module__ = parent
setattr(object_cache[parent], name.split('.')[-1], obj)
# Populate items
for node in root:
obj = object_cache.get(node.attrib['id'])
if obj is None: continue
for ref in node.findall('ref'):
if node.tag == 'class':
if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
else:
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
| Python |
def exact_solution(tf=0.00076, dt=1e-4):
""" Exact solution for the the elliptical drop equations """
import numpy
A0 = 100
a0 = 1.0
t = 0.0
theta = numpy.linspace(0,2*numpy.pi, 101)
Anew = A0
anew = a0
while t <= tf:
t += dt
Aold = Anew
aold = anew
Anew = Aold + dt*(Aold*Aold*(aold**4 - 1))/(aold**4 + 1)
anew = aold + dt*(-aold * Aold)
dadt = Anew**2 * (anew**4 - 1)/(anew**4 + 1)
po = 0.5*-anew**2 * (dadt - Anew**2)
return anew*numpy.cos(theta), 1/anew*numpy.sin(theta), po
#############################################################################
| Python |
""" An example solving stress test case """
import sys
import numpy
from numpy import pi, sin, sinh, cos, cosh
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver, get_particle_array
from pysph.sph.funcs import stress_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
#dt = app.options.time_step if app.options.time_step else 1e-8
CFL = 0.1
dim = 3
#tf = app.options.final_time if app.options.final_time else 1e-2
class PrintPos(object):
def __init__(self, particle_id, props=['x'], filename='stress.dat', write_interval=100):
self.file = open(filename, 'w')
self.file.write('i\t'+'\t'.join(props)+'\n')
self.res = []
self.props = props
self.particle_id = particle_id
self.write_interval = write_interval
def function(self, solver):
l = [solver.count]
for prop in self.props:
l.append(getattr(solver.particles.arrays[0], prop)[self.particle_id])
self.res.append(l)
if solver.count%self.write_interval == 0:
s = '\n'.join('\t'.join(map(str,line)) for line in self.res)
self.file.write(s)
self.file.write('\n')
self.res = []
def create_particles():
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
dx = 0.002 # 2mm
R = 0.02
xl = -0.05
L = 0.2
x,y,z = numpy.mgrid[xl:L+dx/2:dx, -R/2:(R+dx)/2:dx, -R/2:(R+dx)/2:dx]
x = x.ravel()
y = y.ravel()
z = z.ravel()
r2 = y**2+z**2
keep = numpy.flatnonzero(r2<R*R/4)
x = x[keep]
y = y[keep]
z = z[keep]
bdry = (x<dx/2)*1.0
bdry_indices = numpy.flatnonzero(bdry)
rod_indices = numpy.flatnonzero(1-bdry)
x2 = x[bdry_indices]
y2 = y[bdry_indices]
z2 = z[bdry_indices]
x = x[rod_indices]
y = y[rod_indices]
z = z[rod_indices]
print 'num_particles:', len(x), 'num_bdry_particles:', len(x2)
#print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx**dim
m2 = numpy.ones_like(x2)*dx**dim
h = numpy.ones_like(x)*1.5*dx
h2 = numpy.ones_like(x2)*1.5*dx
rho = numpy.ones_like(x)
rho2 = numpy.ones_like(x2)
p = u = x*0
vel_max = 1
v = z*vel_max/max(z)*sin(pi*x/2/L)
w = -y*vel_max/max(y)*sin(pi*x/2/L)
p2 = u2 = v2 = w2 = x2*0
pa = get_particle_array(x=x, y=y, z=z, m=m, rho=rho, h=h, p=p, u=u, v=v, w=w,
name='solid',
)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.25
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.0
pa.constants['dr0'] = dx
pa.constants['c_s'] = (pa.constants['K']/pa.constants['rho0'])**0.5
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
print 'c_s:', pa.c_s
print 'G:', pa.G/pa.c_s**2/pa.rho0
print 'v_f:', pa.v[-1]/pa.c_s, '(%s)'%pa.v[-1]
print 'T:', 2*numpy.pi/(pa.E*0.02**2*(1.875/0.2)**4/(12*pa.rho0*(1-pa.nu**2)))**0.5
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
#print 'CFL:', pa.c_s*dt/dx/2
#print 'particle_motion:', -pa.u[-1]*dt
# boundary particle array
pb = get_particle_array(x=x2, x0=x2, y=y2, y0=y2, z=z2, z0=z2,
m=m2, rho=rho2,
h=h2, p=p2,
name='bdry', type=1,
)
pb.constants['E'] = 1e7
pb.constants['nu'] = 0.25
pb.constants['G'] = pb.constants['E']/(2.0*(1+pb.constants['nu']))
pb.constants['K'] = stress_funcs.get_K(pb.constants['G'], pb.constants['nu'])
pb.constants['rho0'] = 1.0
pb.constants['dr0'] = dx
pb.constants['c_s'] = (pb.constants['K']/pb.constants['rho0'])**0.5
pb.cs = numpy.ones_like(x2) * pb.constants['c_s']
return [pa, pb]
class FixedBoundary(SPHFunction):
def __init__(self, source, dest, props=['x','y','z'],
values=[0,0,0], setup_arrays=True):
self.props = props[:]
self.values = values[:]
SPHFunction.__init__(self, source, dest, setup_arrays)
def set_src_dst_reads(self):
self.src_reads = self.dst_reads = self.props + [i for i in self.values if isinstance(i,str)]
def eval(self, solver):
for i,prop in enumerate(self.props):
p = self.dest.get_carray(prop)
p = p.get_npy_array()
v = self.values[i]
if isinstance(v, str):
p[:] = getattr(self.dest, v)
else:
p[:] = v
# use the solvers default cubic spline kernel
# s = StressSolver(dim=2, integrator_type=solver.RK2Integrator)
# FIXME: LeapFrog Integrator does not work
s = StressSolver(dim=3, integrator_type=solver.EulerIntegrator, xsph=0.5,
marts_eps=0.3, marts_n=4, CFL=CFL)
# can be overriden by commandline arguments
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app.setup(s, create_particles=create_particles)
particles = s.particles
pa, pb = particles.arrays
s.pre_step_functions.append(FixedBoundary(pb, pb, props=['x','y','z','u','v','w','rho'],
values=['x0','y0','z0',0,0,0,'rho0']))
app.run()
| Python |
""" An example solving stress test case """
import sys
import numpy
from numpy import pi, sin, sinh, cos, cosh
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver
from pysph.sph.funcs import stress_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
#dt = app.options.time_step if app.options.time_step else 1e-8
CFL = 0.1
#tf = app.options.final_time if app.options.final_time else 1e-2
class PrintPos(object):
def __init__(self, particle_id, props=['x'], filename='stress.dat', write_interval=100):
self.file = open(filename, 'w')
self.file.write('i\t'+'\t'.join(props)+'\n')
self.res = []
self.props = props
self.particle_id = particle_id
self.write_interval = write_interval
def function(self, solver):
l = [solver.count]
for prop in self.props:
l.append(getattr(solver.particles.arrays[0], prop)[self.particle_id])
self.res.append(l)
if solver.count%self.write_interval == 0:
s = '\n'.join('\t'.join(map(str,line)) for line in self.res)
self.file.write(s)
self.file.write('\n')
self.res = []
def create_particles():
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
dx = 0.002 # 2mm
xl = -0.05
L = 0.2
H = 0.02
x,y = numpy.mgrid[xl:L+dx/2:dx, -H/2:(H+dx)/2:dx]
x = x.ravel()
y = y.ravel()
bdry = (x<dx/2)*1.0
bdry_indices = numpy.flatnonzero(bdry)
print 'num_particles', len(x)
#print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*1.5*dx
rho = numpy.ones_like(x)
z = numpy.zeros_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 10000.0
u = -x
u *= 0.0
#v = numpy.ones_like(x)*1e-2
#v = numpy.sin(x*pi/2.0/5.0)*2.17e3
#v = numpy.sin(x*pi/2.0/5.0)*1e-1
# set the v
kL = 1.875
k = kL/L
M = sin(kL)+sinh(kL)
N = cos(kL) + cosh(kL)
Q = 2*(cos(kL)*sinh(kL) - sin(kL)*cosh(kL))
v_f = 0.01
kx = k*x
# sill need to multiply by c_s
v = v_f*(M*(cos(kx)-cosh(kx)) - N*(sin(kx)-sinh(kx)))/Q
v[bdry_indices] = 0
p *= 0
h *= 1
#u = 0.1*numpy.sin(x*pi/2.0/5.0)
#u[numpy.flatnonzero(x<0.01)] = 0
pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v, z=z,w=z,
ubar=z, vbar=z, wbar=z,
name='solid', type=1,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z,
MArtStress00=z, MArtStress11=z, MArtStress22=z,
MArtStress01=z, MArtStress12=z, MArtStress02=z,
bdry=bdry
)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.25
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.0
pa.constants['dr0'] = dx
pa.constants['c_s'] = (pa.constants['K']/pa.constants['rho0'])**0.5
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
print 'c_s:', pa.c_s
print 'G:', pa.G/pa.c_s**2/pa.rho0
pa.v *= pa.c_s
print 'v_f:', pa.v[-1]/pa.c_s, '(%s)'%pa.v[-1]
print 'T:', 2*numpy.pi/(pa.E*0.02**2*(1.875/0.2)**4/(12*pa.rho0*(1-pa.nu**2)))**0.5
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
#print 'CFL:', pa.c_s*dt/dx/2
#print 'particle_motion:', -pa.u[-1]*dt
# boundary particle array
x, y = numpy.mgrid[xl:dx/2:dx, H/2+dx:H/2+3.5*dx:dx]
x = x.ravel()
y = y.ravel()
x2, y2 = x, -y
x = numpy.concatenate([x,x2])
y = numpy.concatenate([y,y2])
z = numpy.zeros_like(x)
rho = numpy.ones_like(x)
m = rho*dx*dx
h = 1.5*dx*rho
pb = base.get_particle_array(x=x, x0=x, y=y, y0=y, m=m, rho=rho,
h=h, p=z, u=z, v=z, z=z,w=z,
ubar=z, vbar=z, wbar=z,
name='bdry', type=1,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z,
MArtStress00=z, MArtStress11=z, MArtStress22=z,
MArtStress01=z, MArtStress12=z, MArtStress02=z,
)
pb.constants['E'] = 1e9
pb.constants['nu'] = 0.25
pb.constants['G'] = pb.constants['E']/(2.0*(1+pb.constants['nu']))
pb.constants['K'] = stress_funcs.get_K(pb.constants['G'], pb.constants['nu'])
pb.constants['rho0'] = 1.0
pb.constants['dr0'] = dx
pb.constants['c_s'] = (pb.constants['K']/pb.constants['rho0'])**0.5
pb.cs = numpy.ones_like(x) * pb.constants['c_s']
return [pa, pb]
class FixedBoundary(SPHFunction):
def __init__(self, source, dest, props=['x','y','z'],
values=[0,0,0], setup_arrays=True):
self.props = props[:]
self.values = values[:]
SPHFunction.__init__(self, source, dest, setup_arrays)
def set_src_dst_reads(self):
self.src_reads = self.dst_reads = self.props + [i for i in self.values if isinstance(i,str)]
def eval(self, solver):
for i,prop in enumerate(self.props):
p = self.dest.get_carray(prop)
p = p.get_npy_array()
v = self.values[i]
if isinstance(v, str):
p[:] = getattr(self.dest, v)
else:
p[:] = v
# use the solvers default cubic spline kernel
# s = StressSolver(dim=2, integrator_type=solver.RK2Integrator)
s = StressSolver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator,
xsph=0.5, marts_eps=0.3, marts_n=4, CFL=CFL)
# can be overriden by commandline arguments
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app.setup(s, create_particles=create_particles)
particles = s.particles
pa, pb = particles.arrays
s.pre_step_functions.append(FixedBoundary(pb, pb, props=['x','y','u','v','rho'],
values=['x0','y0',0,0,'rho0']))
app.run()
| Python |
""" Balls colliding in 2D """
import numpy
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.solver.api as solver
import pysph.sph.funcs.stress_funcs as stress_funcs
app = solver.Application()
Solid = base.ParticleType.Solid
E = 1e7
nu = 0.3975
G = E/(2.0*(1+nu))
K = sph.get_K(G, nu)
ro = 1.0
co = numpy.sqrt(K/ro)
deltap = 0.001
fac=1e-10
print "co, ro, G = ", co, ro, G
def create_particles(two_arr=False):
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
dx = 0.001 # 1mm
ri = 0.03 # 3cm inner radius
ro = 0.04 # 4cm outer radius
spacing = 0.041 # spacing = 2*5cm
x,y = numpy.mgrid[-ro:ro:dx, -ro:ro:dx]
x = x.ravel()
y = y.ravel()
d = (x*x+y*y)
keep = numpy.flatnonzero((ri*ri<=d) * (d<ro*ro))
x = x[keep]
y = y[keep]
print 'num_particles', len(x)*2
if not two_arr:
x = numpy.concatenate([x-spacing,x+spacing])
y = numpy.concatenate([y,y])
#print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*1.4*dx
rho = numpy.ones_like(x)
z = numpy.zeros_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 10000.0
# u is set later
v = z
u_f = 0.059
p *= 0
h *= 1
pa = base.get_particle_array(cl_precision="single",
name="ball", type=Solid,
x=x+spacing, y=y,
m=m, rho=rho, h=h,
p=p, cs=cs,
u=z, v=v)
pa.cs[:] = co
pa.u = pa.cs*u_f*(2*(x<0)-1)
pa.constants['dr0'] = dx
pa.constants["rho0"] = ro
return pa
s = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
# Add the operations
# Velocity Gradient tensor
s.add_operation(solver.SPHOperation(
sph.VelocityGradient2D.withargs(), on_types=[Solid,],
id="vgrad")
)
# Equation of state
s.add_operation(solver.SPHOperation(
sph.IsothermalEquation.withargs(ro=ro, co=co), on_types=[Solid,],
id="eos", updates=['p'])
)
# Artificial stress
s.add_operation(solver.SPHOperation(
sph.MonaghanArtificialStress.withargs(eps=0.3),
on_types=[Solid,],
id="art_stress",)
)
# density rate
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(), on_types=[Solid,], from_types=[Solid],
id="density", updates=['rho'])
)
# momentum equation artificial viscosity
s.add_operation(solver.SPHIntegration(
sph.MonaghanArtificialViscosity.withargs(alpha=1.0, beta=1.0),
on_types=[Solid,], from_types=[Solid,],
id="avisc", updates=['u','v'])
)
# momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquationWithStress2D.withargs(deltap=deltap, n=4),
on_types=[Solid,],
from_types=[Solid,], id="momentum", updates=['u','v'])
)
# s.add_operation(solver.SPHIntegration(
# sph.MonaghanArtStressAcc.withargs(n=4, deltap=deltap, rho0=ro,
# R="R_"),
# from_types=[Solid], on_types=[Solid],
# updates=['u','v'],
# id='mart_stressacc')
# )
# XSPH
s.add_operation(solver.SPHIntegration(
sph.XSPHCorrection.withargs(eps=0.5),
on_types=[Solid,], from_types=[Solid,],
id="xsph", updates=['u','v'])
)
# stress rate
s.add_operation(solver.SPHIntegration(
sph.HookesDeviatoricStressRate2D.withargs(shear_mod=G),
on_types=[Solid,],
id="stressrate")
)
# position stepping
s.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Solid,],
id="step", updates=['x','y'])
)
app.setup(s, create_particles=create_particles)
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app.run()
###############################################################################
# DEBUG
s1 = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
# Velocity Gradient tensor
s1.add_operation(solver.SPHOperation(
sph.VelocityGradient2D.withargs(), on_types=[Solid,],
id="vgrad")
)
# Equation of state
s1.add_operation(solver.SPHOperation(
sph.IsothermalEquation.withargs(ro=ro, co=co), on_types=[Solid,],
id="eos", updates=['p'])
)
# density rate
s1.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(), on_types=[Solid,], from_types=[Solid],
id="density", updates=['rho'])
)
# s1.add_operation(solver.SPHOperation(
# stress_funcs.MonaghanArtStressD.withargs(eps=0.3, stress="S_"),
# on_types=[Solid],
# updates=['MArtStress00','MArtStress11','MArtStress22'],
# id='mart_stress_d')
# )
# s1.add_operation(solver.SPHOperation(
# stress_funcs.MonaghanArtStressS.withargs(eps=0.3, stress="S_"),
# on_types=[Solid],
# updates=['MArtStress12','MArtStress02','MArtStress01'],
# id='mart_stress_s')
# )
# s1.add_operation(solver.SPHIntegration(
# stress_funcs.MonaghanArtStressAcc.withargs(n=4),
# from_types=[Solid], on_types=[Solid],
# updates=['u','v','w'],
# id='mart_stressacc')
# )
# momentum equation
s1.add_operation(solver.SPHIntegration(
sph.MomentumEquationWithStress2D.withargs(theta_factor=fac,
deltap=deltap, n=4,
epsp=0.3, epsm=0),
on_types=[Solid,],
from_types=[Solid,], id="momentum", updates=['u','v'])
)
# s1.add_operation(solver.SPHIntegration(
# stress_funcs.SimpleStressAcceleration.withargs(stress="S_"),
# from_types=[Solid], on_types=[Solid],
# updates=['u','v','w'],
# id='stressacc')
# )
# momentum equation artificial viscosity
s1.add_operation(solver.SPHIntegration(
sph.MonaghanArtificialVsicosity.withargs(alpha=1.0, beta=1.0, eta=0.0),
on_types=[Solid,], from_types=[Solid,],
id="avisc", updates=['u','v'])
)
# stress rate
s1.add_operation(solver.SPHIntegration(
sph.HookesDeviatoricStressRate2D.withargs(shear_mod=G),
on_types=[Solid,],
id="stressrate")
)
# position stepping
s1.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Solid,],
id="step", updates=['x','y','z'])
)
dt = 1e-8
tf = 1e-2
s1.set_time_step(dt)
s1.set_final_time(tf)
s1.set_kernel_correction(-1)
s1.pfreq = 100
app1.setup(s1, create_particles=create_particles)
#app.run()
# can be overriden by commandline arguments
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app2.setup(s, create_particles=create_particles)
#print [calc.id for calc in s.integrator.calcs]
#print [calc.id for calc in s1.integrator.calcs]
# particles = s.particles
# pa = particles.arrays[0]
def check():
array1 = s.particles.arrays[0]
array2 = s1.particles.arrays[0]
props = ['x','y','u','v','rho','p']
np = array1.get_number_of_particles()
nk = array2.get_number_of_particles()
assert np == nk
for prop in props:
p = array1.get(prop)
k = array2.get(prop)
err = abs(p - k)
print prop, sum(err)/nk, max(err)
t = 0.0
while t < tf:
print "Checkking at %g ", t
check()
print
t += dt
s.set_final_time(t)
s1.set_final_time(t)
s.solve(dt)
s1.solve(dt)
| Python |
""" An example solving stress test case """
import numpy
import sys
import pysph.base.api as base
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver, get_particle_array
from pysph.sph.funcs import stress_funcs, arithmetic_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
#dt = app.options.time_step if app.options.time_step else 1e-8
#tf = app.options.final_time if app.options.final_time else 1e-2
class PrintPos(object):
''' print properties of a particle in a column format (gnuplot/np.loadtxt) '''
def __init__(self, particle_id, props=['x'], filename='stress.dat', write_interval=100):
self.file = open(filename, 'w')
self.file.write('i\t'+'\t'.join(props)+'\n')
self.res = []
self.props = props
self.particle_id = particle_id
self.write_interval = write_interval
def function(self, solver):
l = [solver.count]
for prop in self.props:
l.append(getattr(solver.particles.arrays[0], prop)[self.particle_id])
self.res.append(l)
if solver.count%self.write_interval == 0:
s = '\n'.join('\t'.join(map(str,line)) for line in self.res)
self.file.write(s)
self.file.write('\n')
self.res = []
def create_particles():
#N = 21
dx = 0.1
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
x,y = numpy.mgrid[-0.2:5.01:dx, -0.2:0.21:dx]
x = x.ravel()
y = y.ravel()
bdry = (x<0.01)*1.0
print 'num_particles', len(x)
print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*1.4*dx
rho = numpy.ones_like(x)
z = numpy.zeros_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 10000.0
u = -x
u *= 1e0
h *= 1
v = 0.0*y
p *= 0.0
pa = get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v, z=z,w=z,
bdry=bdry)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.3
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.
pa.constants['dr0'] = dx
pa.constants['c_s'] = numpy.sqrt(pa.constants['K']/pa.constants['rho0'])
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
pa.set(idx=numpy.arange(len(pa.x)))
print 'G_mu', pa.G/pa.K
print 'Number of particles: ', len(pa.x)
return pa
class FixedBoundary(SPHFunction):
def __init__(self, source, dest, particle_indices, props=['x','y','z'],
values=[0,0,0], setup_arrays=True):
self.indices = particle_indices
self.props = props
self.values = values
SPHFunction.__init__(self, source, dest, setup_arrays)
def set_src_dst_reads(self):
self.src_reads = self.dst_reads = self.props
def eval(self, solver):
for i,prop in enumerate(self.props):
p = self.dest.get(prop)
p[self.indices] = self.values[i]
CFL=None
# use the solvers default cubic spline kernel
s = StressSolver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator,
xsph=0.5, marts_eps=0.3, marts_n=4, CFL=CFL)
dt = 1e-8
tf = 1e-3
s.set_time_step(dt)
s.set_final_time(tf)
s.pfreq = 100
app.setup(s, create_particles=create_particles)
particles = s.particles
pa = particles.arrays[0]
s.pre_step_functions.append(FixedBoundary(pa, pa, props=['u'], values=[0],
particle_indices=numpy.flatnonzero(pa.bdry)))
s.pre_step_functions.append(FixedBoundary(pa, pa, props=['v'], values=[0],
particle_indices=range(len(pa.x))))
s.set_kernel_correction(-1)
app.run()
| Python |
""" An example solving stress test case : colliding rubber balls """
import sys
import numpy
from numpy import pi, sin, sinh, cos, cosh
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver
from pysph.sph.funcs import stress_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
#dt = app.options.time_step if app.options.time_step else 1e-8
#tf = app.options.final_time if app.options.final_time else 1e-2
def create_particles(two_arr=False):
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
dx = 0.001 # 1mm
ri = 0.03 # 3cm inner radius
ro = 0.04 # 4cm outer radius
spacing = 0.041 # spacing = 2*5cm
x,y = numpy.mgrid[-ro:ro:dx, -ro:ro:dx]
x = x.ravel()
y = y.ravel()
d = (x*x+y*y)
keep = numpy.flatnonzero((ri*ri<=d) * (d<ro*ro))
x = x[keep]
y = y[keep]
print 'num_particles', len(x)*2
if not two_arr:
x = numpy.concatenate([x-spacing,x+spacing])
y = numpy.concatenate([y,y])
#print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*1.4*dx
rho = numpy.ones_like(x)
z = numpy.zeros_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 10000.0
# u is set later
v = z
u_f = 0.059
p *= 0
h *= 1
#u = 0.1*numpy.sin(x*pi/2.0/5.0)
#u[numpy.flatnonzero(x<0.01)] = 0
pa = base.get_particle_array(x=x+spacing, y=y, m=m, rho=rho, h=h, p=p, u=z, v=v, z=z,w=z,
ubar=z, vbar=z, wbar=z,
name='right_ball', type=1,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z,
MArtStress00=z, MArtStress11=z, MArtStress22=z,
MArtStress01=z, MArtStress12=z, MArtStress02=z,
#bdry=bdry
)
pa.constants['E'] = 1e7
pa.constants['nu'] = 0.3975
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.0
pa.constants['dr0'] = dx
pa.constants['c_s'] = (pa.constants['K']/pa.constants['rho0'])**0.5
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
print 'c_s:', pa.c_s
print 'G:', pa.G/pa.c_s**2/pa.rho0
pa.u = pa.c_s*u_f*(2*(x<0)-1)
print 'u_f:', pa.u[0]/pa.c_s, '(%s)'%pa.u[0]
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
print 'CFL:', pa.c_s*dt/dx/2
print 'particle_motion:', abs(pa.u[0]*dt)
if two_arr:
pb = base.get_particle_array(x=x-spacing, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v, z=z,w=z,
ubar=z, vbar=z, wbar=z,
name='left_ball', type=1,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z,
MArtStress00=z, MArtStress11=z, MArtStress22=z,
MArtStress01=z, MArtStress12=z, MArtStress02=z,
#bdry=bdry
)
pb.constants['E'] = 1e7
pb.constants['nu'] = 0.3975
pb.constants['G'] = pb.constants['E']/(2.0*1+pb.constants['nu'])
pb.constants['K'] = stress_funcs.get_K(pb.constants['G'], pb.constants['nu'])
pb.constants['rho0'] = 1.0
pb.constants['c_s'] = (pb.constants['K']/pb.constants['rho0'])**0.5
pb.cs = numpy.ones_like(x) * pb.constants['c_s']
print 'c_s:', pb.c_s
print 'G:', pb.G/pb.c_s**2/pb.rho0
print 'G_mu', pa.G/pa.K
pa.u = pa.c_s*u_f*(2*(x<0)-1)
print 'u_f:', pb.u[-1]/pb.c_s, '(%s)'%pb.u[-1]
pb.set(idx=numpy.arange(len(pb.x)))
print 'Number of particles: ', len(pb.x)
return [pa, pb]
else:
return pa
cfl = 0.1
# use the solvers default cubic spline kernel
# s = StressSolver(dim=2, integrator_type=solver.RK2Integrator)
s = StressSolver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator,
xsph=0.5, marts_eps=0.3, marts_n=4, CFL=cfl)
# can be overriden by commandline arguments
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app.setup(s, create_particles=create_particles)
particles = s.particles
pa = particles.arrays[0]
app.run()
| Python |
""" An example solving stress test case """
import numpy
import sys
import os
import pysph.base.api as base
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver, get_particle_array
from pysph.sph.funcs import stress_funcs, arithmetic_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
app.opt_parse.add_option('--hfac', action='store', dest='hfac', default=None,
type='float',
help='the smoothing length as a factor of particle spacing')
app.opt_parse.add_option('--N', action='store', dest='N', default=None, type='float',
help='number of partitions (num particles=N+1)')
class PrintPos(object):
''' print properties of a particle in a column format (gnuplot/np.loadtxt) '''
def __init__(self, particle_id, props=['x'], filename='stress.dat'):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
self.file = open(filename, 'w')
self.file.write('i\tt\t'+'\t'.join(props)+'\n')
self.res = []
self.props = props
self.particle_id = particle_id
def function(self, solver):
l = [solver.count, solver.t]
for prop in self.props:
l.append(getattr(solver.particles.arrays[0], prop)[self.particle_id])
self.res.append(l)
s = '\n'.join('\t'.join(map(str,line)) for line in self.res)
self.file.write(s)
self.file.write('\n')
self.res = []
def create_particles():
N = app.options.N or 20
N += 1
hfac = app.options.hfac or 1.2
rho0 = 1.0
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
x = numpy.mgrid[0:1:1j*N]
dx = 1.0/(N-1)
x = x.ravel()
#y = y.ravel()
bdry = (x<=0)
print bdry, numpy.flatnonzero(bdry)
m = rho0*numpy.ones_like(x)*dx
h = numpy.ones_like(x)*hfac*dx
rho = rho0*numpy.ones_like(x)
y = z = numpy.zeros_like(x)
p = z
#cs = numpy.ones_like(x) * 10000.0
u = -x
u *= 0.1
pa = get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=z, z=z,w=z,
name='solid', type=1,
bdry=bdry,)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.3
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = rho0
pa.constants['dr0'] = dx
pa.constants['c_s'] = numpy.sqrt(pa.constants['K']/pa.constants['rho0'])
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
pa.set(idx=numpy.arange(len(pa.x)))
print 'G:', pa.G
print 'K', pa.K
print 'c_s', pa.c_s
print 'Number of particles: ', len(pa.x)
return pa
class FixedBoundary(SPHFunction):
def __init__(self, source, dest, particle_indices, props=['x','y','z'],
values=[0,0,0], setup_arrays=True):
self.indices = particle_indices
self.props = props
self.values = values
SPHFunction.__init__(self, source, dest, setup_arrays)
def set_src_dst_reads(self):
self.src_reads = self.dst_reads = self.props
def eval(self, solver):
for i,prop in enumerate(self.props):
self.dest.get(prop)[self.indices] = self.values[i]
# use the solvers default cubic spline kernel
s = StressSolver(dim=1, integrator_type=solver.PredictorCorrectorIntegrator, xsph=0.5, marts_eps=0.3, marts_n=4, CFL=None)
# can be overriden by commandline arguments
s.set_time_step(1e-7)
s.set_final_time(1e-3)
app.setup(s, create_particles=create_particles)
particles = s.particles
pa = particles.arrays[0]
s.pre_step_functions.append(FixedBoundary(pa, pa, props=['u','x'], values=[0,0],
particle_indices=numpy.flatnonzero(pa.bdry)))
for i in range(len(particles.arrays[0].x)):
app.command_manager.add_function(PrintPos(i, ['x','y','u','p','rho','sigma00','ubar'],
s.output_directory+'/stress%s.dat'%i).function,
interval=1)
s.set_kernel_correction(-1)
s.pfreq = 10
app.run()
sys.exit(0)
from pylab import *
pa = particles.arrays[0]
plot(pa.x, pa.y, '.', label='y')
legend(loc='best')
figure()
plot(pa.x, pa.u, '.', label='u')
legend(loc='best')
figure()
plot(pa.x, pa.ubar, '.', label='ubar')
legend(loc='best')
figure()
plot(pa.x, pa.rho, '.', label='rho')
legend(loc='best')
figure()
plot(pa.x, pa.p, '.', label='p')
legend(loc='best')
figure()
plot(pa.x, pa.sigma00, '.', label='sigma00')
legend(loc='best')
print pa.x
print pa.y
print pa.z
print pa.u
print pa.v
print pa.w
show()
| Python |
""" Example file showing the use of solver controller and various interfaces
Usage:
Run this file after running the `controller_elliptical_drop.py` example file
A matplotlib plot window will open showing the current position of all
the particles and colored according to their velocities. The plot is updated
every second. This is based on the multiprocessing interface
A browser window is also opened which displays the various solver properties
and also allows you to change then. It is based on the xml-rpc interface
"""
import matplotlib
matplotlib.use('GTKAgg') # do this before importing pylab
import matplotlib.pyplot as plt
import gobject # for the gobject timer
import time
import numpy
import webbrowser
import xmlrpclib
from pysph.solver.solver_interfaces import MultiprocessingClient
def test_interface_nonblocking(controller):
print 't1', controller.get('dt')
print 't2', controller.get_dt()
task_id = controller.pause_on_next()
print task_id
time.sleep(1)
print 'count', controller.get_count()
time.sleep(1)
# main thread is stopped; count should still be same
print 'count2', controller.get_count()
controller.cont()
# main thread now still running; count should have increased
time.sleep(1)
print 'count3', controller.get_count()
task_id = controller.get_particle_array_names()
pa_names = controller.get_result(task_id) # blocking call
print 'pa_names', task_id, pa_names
print controller.get_status()
def test_interface_blocking(controller):
print 't1', controller.get('dt')
print 't2', controller.get_dt()
task_id = controller.pause_on_next()
print task_id
time.sleep(1)
print 'count', controller.get_count()
time.sleep(1)
# main thread is stopped; count should still be same
print 'count2', controller.get_count()
controller.cont()
# main thread now still running; count should have increased
time.sleep(1)
print 'count3', controller.get_count()
pa_names = controller.get_particle_array_names() # blocking call
print 'pa_names', task_id, pa_names
print controller.get_status()
def test_XMLRPC_interface(address='http://localhost:8900/'):
client = xmlrpclib.ServerProxy(address, allow_none=True)
print client.system.listMethods()
# client has all methods of `control` instance
print client.get_t()
print 'xmlrpcclient:count', client.get('count')
test_interface_blocking(client)
client.set_blocking(False)
test_interface_nonblocking(client)
client.set_blocking(True)
return client
def test_web_interface(address='http://127.0.0.1:8900/controller_elliptical_drop_client.html'):
webbrowser.open(url=address)
def test_multiprocessing_interface(address=('localhost',8800), authkey='pysph'):
client = MultiprocessingClient(address, authkey)
controller = client.controller
pa_names = controller.get_particle_array_names() # blocking call
print controller.get_named_particle_array(pa_names[0]) # blocking call
test_interface_blocking(controller)
controller.set_blocking(False)
test_interface_nonblocking(controller)
controller.set_blocking(True)
return controller
def test_plot(controller):
controller.set_blocking(True)
pa_name = controller.get_particle_array_names()[0]
pa = controller.get_named_particle_array(pa_name)
#plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line = ax.scatter(pa.x, pa.y, c=numpy.hypot(pa.u,pa.v))
global t
t = time.time()
def update():
global t
t2 = time.time()
dt = t2 - t
t = t2
print 'count:', controller.get_count(), '\ttimer time:', dt,
pa = controller.get_named_particle_array(pa_name)
line.set_offsets(zip(pa.x, pa.y))
line.set_array(numpy.hypot(pa.u,pa.v))
fig.canvas.draw()
print '\tresult & draw time:', time.time()-t
return True
update()
# due to some gil issues in matplotlib, updates work only when
# mouse is being hovered over the plot area (or a key being pressed)
# when using python threading.Timer. Hence gobject.timeout_add
# is being used instead
gobject.timeout_add_seconds(1, update)
plt.show()
def test_main():
test_XMLRPC_interface()
controller = test_multiprocessing_interface()
test_web_interface()
test_plot(controller)
if __name__ == '__main__':
test_main()
| Python |
""" An example solving the Elliptical drop test case with various interfaces """
import pysph.base.api as base
import pysph.solver.api as solver
app = solver.Application()
app.process_command_line(['-q', '--interactive',
'--xml-rpc=0.0.0.0:8900', '--multiproc=pysph@0.0.0.0:8800'])
s = solver.FluidSolver(dim=2, integrator_type=solver.EulerIntegrator)
app.set_solver(s, create_particles=solver.fluid_solver.get_circular_patch,
variable_h=False, name='fluid', type=0)
s.set_time_step(1e-5)
s.set_final_time(1e-1)
s.pfreq = 1000
if __name__ == '__main__':
app.run()
| Python |
""" Simple motion. """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
from random import randint
from numpy import random
nx = 1 << 5
dx = 0.5/nx
def create_particles_3d(**kwargs):
x, y, z = numpy.mgrid[0.25:0.75+1e-10:dx,
0.25:0.75+1e-10:dx,
0.25:0.75+1e-10:dx]
x = x.ravel()
y = y.ravel()
z = z.ravel()
np = len(x)
u = random.random(np) * 0
v = random.random(np) * 0
w = random.random(np) * 0
m = numpy.ones_like(x) * dx**3
vol_per_particle = numpy.power(0.5**3/np ,1.0/3.0)
radius = 2 * vol_per_particle
print "Using smoothing length: ", radius
h = numpy.ones_like(x) * radius
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x, y=y, z=z,
u=u, v=v, w=w,
m=m,h=h)
print "Number of particles: ", fluid.get_number_of_particles()
return [fluid,]
def create_particles_2d(**kwargs):
x, y = numpy.mgrid[0.25:0.75+1e-10:dx, 0.25:0.75+1e-10:dx]
x = x.ravel()
y = y.ravel()
np = len(x)
u = numpy.zeros_like(x)
v = numpy.zeros_like(x)
m = numpy.ones_like(x) * dx**2
vol_per_particle = numpy.power(0.5**2/np ,1.0/2.0)
radius = 2 * vol_per_particle
print "Using smoothing length: ", radius
h = numpy.ones_like(x) * radius
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x, y=y,
u=u, v=v,
m=m,
h=h)
print "Number of particles: ", fluid.get_number_of_particles()
return [fluid,]
# define an integrator
class CrazyIntegrator(solver.EulerIntegrator):
"""Crazy integrator """
def step(self, dt):
""" Step the particle properties. """
# get the current stage of the integration
k_num = self.cstep
for array in self.arrays:
np = array.get_number_of_particles()
# get the mapping for this array and this stage
to_step = self.step_props[ array.name ][k_num]
for prop in to_step:
initial_prop = to_step[ prop ][0]
step_prop = to_step[ prop ][1]
initial_arr = array.get( initial_prop )
step_arr = array.get( step_prop )
updated_array = initial_arr + step_arr * dt
# simply use periodicity for the positions
if prop in ['x', 'y', 'z']:
updated_array[numpy.where(updated_array < 0)[0]] += 1
updated_array[numpy.where(updated_array > 1)[0]] -= 1
array.set( **{prop:updated_array} )
# Increment the step by 1
self.cstep += 1
app = solver.Application()
s = solver.Solver(dim=2, integrator_type=CrazyIntegrator)
# Update the density of the particles
s.add_operation(solver.SPHOperation(
sph.SPHRho.withargs(), on_types=[base.Fluid], from_types=[base.Fluid],
updates=["rho"],
id="sd")
)
# Compute some interaction between particles
s.add_operation(solver.SPHIntegration(
sph.ArtificialPotentialForce.withargs(factorp=1.0, factorm=1.0),
on_types=[base.Fluid], from_types=[base.Fluid, base.Solid],
updates=["u","v", "w"],
id="potential")
)
# step the particles
s.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[base.Fluid],
updates=["x","y","z"],
id="step")
)
s.set_time_step(1e-2)
s.set_final_time(5)
app.setup(
solver=s,
variable_h=False,
create_particles=create_particles_2d)
cm = s.particles.cell_manager
print "Number of cells, cell size = %d, %g"%(len(cm.cells_dict), cm.cell_size)
# add a post step function to save the neighbor information every 10
# iterations
#s.post_step_functions.append( solver.SaveCellManagerData(
# s.pid, path=s.output_directory, count=50) )
app.run()
| Python |
""" A script to demonstrate the simplest of calculations in parallel
Setup:
------
Two particle arrays are created on two separate processors with the
following procerties:
processor 0:
x ~ [0,1], dx = 0.1, h = 0.2, m = 0.1, fval = x*x
processor 1:
x ~ [1.1, 2], dx = 0.1, h = 0.2, m = 0.1, fval = x*x
"""
# mpi imports
from mpi4py import MPI
#numpy and logging
import numpy, logging
#local pysph imports
import pysph.sph.api as sph
import pysph.solver.api as solver
from pysph.base.carray import LongArray
from pysph.base.api import Particles, get_particle_array
from pysph.base.kernels import CubicSplineKernel
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
if num_procs > 2:
raise SystemError, 'Start this script on less than 5 processors'
# logging setup
logger = logging.getLogger()
log_file_name = '/tmp/log_pysph_'+str(rank)
logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
filemode='w')
logger.addHandler(logging.StreamHandler())
#create the particles on processor 0
if rank == 0:
x = numpy.linspace(0,1,11)
h = numpy.ones_like(x)*0.2
m = numpy.ones_like(x)*0.1
rho = numpy.ones_like(x)
fval = x*x
#create the particles on processor 1
if rank == 1:
x = numpy.linspace(1.1,2,10)
h = numpy.ones_like(x)*0.2
m = numpy.ones_like(x)*0.1
rho = numpy.ones_like(x)
fval = x*x
#create the particles in parallel without load balancing
kernel = CubicSplineKernel(dim=1)
pa = get_particle_array(x=x, h=h, m=m, fval=fval, rho=rho)
particles = Particles([pa], in_parallel=True,
load_balancing=False)
#make sure the particles need updating
particles.update()
#choose the function and the sph calc
func = sph.SPHRho(pa, pa)
calc = sph.SPHCalc(particles=particles, kernel=kernel, func=func,
updates=['rho'], integrates=False)
tmpx = pa.get('tmpx', only_real_particles=False)
logger.debug('tempx for all particles %s'%(tmpx))
#perform the summation density operation
calc.sph()
local = pa.get('local', only_real_particles=False)
logger.debug('Local indices for process %d are %s'%(rank, local))
#check for the density values on each processor
rho = pa.get('tmpx', only_real_particles=True)
logger.debug('Density for local particles on processor %d is %s '%(rank, rho))
| Python |
""" The moving square test case is part of the SPHERIC benchmark
tests. Refer to the document for the test details.
Numerical Parameters:
---------------------
dx = dy = 0.005
h = 0.0065 => h/dx = 1.3
Length of Box = 10
Height of Box = 5
Number of particles = 27639 + 1669 = 29308
ro = 1000.0
Vmax = 1.0
co = 15 (15 * Vmax)
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
DummyFluid = base.ParticleType.DummyFluid
dx = 0.05
h = 1.3*dx
ro = 1000.0
co = 15.0
gamma = 7.0
alpha = 0.5
eps = 0.5
box_length = 10.0
box_height = 5.0
square_side = 1.0
B = co*co*ro/gamma
m = ro*dx*dx
pi = numpy.pi
pi2 = pi/2.0
class MoveSquare:
def __init__(self, fname = "Motion_Body.dat"):
self.original_position = 1.5
motion = numpy.loadtxt(fname)
self.time = motion[:,0]
self.disp = motion[:,3]
def eval(self, solver):
particles = solver.particles
time = solver.time
square = particles.get_named_particle_array("square")
x = square.get('x')
new_pos = numpy.interp(time, self.time, self.disp)
displacement = new_pos - self.original_position
x += displacement
square.set(x=x)
def get_wall():
""" Get the wall particles """
left = base.Line(base.Point(), box_height, pi2)
top = base.Line(base.Point(0, box_height), box_length, 0)
right = base.Line(base.Point(box_length, box_height), box_height, pi+pi2)
bottom = base.Line(base.Point(box_length), box_length, pi)
box_geom = base.Geometry('box', [left, top, right, bottom], is_closed=True)
box_geom.mesh_geometry(dx)
box = box_geom.get_particle_array(re_orient=False)
box.m[:] = m
box.h[:] = h
return box
def get_square():
""" Get the square particle array """
left = base.Line(base.Point(1,2), square_side, pi2)
top = base.Line(base.Point(1,3), square_side, 0)
right = base.Line(base.Point(2,3), square_side, pi+pi2)
bottom = base.Line(base.Point(2,2), square_side, pi)
square_geom = base.Geometry('square', [left, top, right, bottom],
is_closed=True)
square_geom.mesh_geometry(dx)
square = square_geom.get_particle_array(name="square", re_orient=True)
square.m[:] = m
square.h[:] = h
return square
def get_fluid():
""" Get the fluid particle array """
x, y = numpy.mgrid[dx: box_length - 1e-10: dx,
dx: box_height - 1e-10: dx]
xf, yf = x.ravel(), y.ravel()
mf = numpy.ones_like(xf) * m
hf = numpy.ones_like(xf) * h
rhof = numpy.ones_like(xf) * ro
cf = numpy.ones_like(xf) * co
pf = numpy.zeros_like(xf)
fluid = base.get_particle_array(name="fluid", type=Fluid,
x=xf, y=yf, h=hf, rho=rhof, c=cf, p=pf)
# remove indices within the square
indices = []
np = fluid.get_number_of_particles()
x, y = fluid.get('x','y')
for i in range(np):
if 1.0 -dx/2 <= x[i] <= 2.0 + dx/2:
if 2.0 - dx/2 <= y[i] <= 3.0 + dx/2:
indices.append(i)
to_remove = base.LongArray(len(indices))
to_remove.set_data(numpy.array(indices))
fluid.remove_particles(to_remove)
return fluid
def get_dummy_particles():
x, y = numpy.mgrid[-5*dx: box_length + 5*dx + 1e-10: dx,
-5*dx: box_height + 5*dx + 1e-10: dx]
xd, yd = x.ravel(), y.ravel()
md = numpy.ones_like(xd) * m
hd = numpy.ones_like(xd) * h
rhod = numpy.ones_like(xd) * ro
cd = numpy.ones_like(xd) * co
pd = numpy.zeros_like(xd)
dummy_fluid = base.get_particle_array(name="dummy_fluid",
type=Fluid, x=xd, y=yd,
h=hd, rho=rhod, c=cd, p=pd)
# remove indices within the square
indices = []
np = dummy_fluid.get_number_of_particles()
x, y = dummy_fluid.get('x','y')
for i in range(np):
if -dx/2 <= x[i] <= box_length + dx/2:
if - dx/2 <= y[i] <= box_height+ dx/2:
indices.append(i)
to_remove = base.LongArray(len(indices))
to_remove.set_data(numpy.array(indices))
dummy_fluid.remove_particles(to_remove)
return dummy_fluid
def get_particles():
wall = get_wall()
square = get_square()
fluid = get_fluid()
dummy_fluid = get_dummy_particles()
return [wall, square, fluid, dummy_fluid]
app = solver.Application()
app.process_command_line()
particles = app.create_particles(False, get_particles)
s = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
# Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation(co=co, ro=ro),
on_types=[Fluid],
updates=['p', 'cs'],
id='eos')
)
# Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate(),
on_types=[Fluid], from_types=[Fluid, DummyFluid],
updates=['rho'], id='density')
)
# momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation(alpha=alpha, beta=0.0),
on_types=[Fluid], from_types=[Fluid, DummyFluid],
updates=['u','v'], id='mom')
)
# monaghan boundary force
s.add_operation(solver.SPHIntegration(
sph.MonaghanBoundaryForce(delp=dx),
on_types=[Fluid], from_types=[Solid], updates=['u','v'],
id='bforce')
)
# Position stepping and XSPH correction
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
# add post step and pre step functions for movement
s.set_final_time(3.0)
s.set_time_step(1e-5)
s.post_step_functions.append(MoveSquare())
app.set_solver(s)
app.run()
| Python |
""" 2D Dam Break Over a dry bed. The case is described in "State of
the art classical SPH for free surface flows", Benedict D Rogers,
Robert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic
Research, Vol 48, Extra Issue (2010), pp 6-27
Setup:
------
x x !
x x !
x x !
x x !
x o o o x !
x o o x !3m
x o o o x !
x o o x !
x o o o x !
x x !
xxxxxxxxxxxxxxxxxxxxx | o -- Fluid Particles
x -- Solid Particles
-dx- dx = dy
_________4m___________
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
Fluid particles are placed on a staggered grid. The nodes of the grid
are located at R = l*dx i + m * dy j with a two point bias (0,0) and
(dx/2, dy/2) refered to the corner defined by R. l and m are integers
and i and j are the unit vectors alon `X` and `Y` respectively.
For the Monaghan Type Repulsive boundary condition, a single row of
boundary particles is used with a boundary spacing delp = dx = dy.
For the Dynamic Boundary Conditions, a staggered grid arrangement is
used for the boundary particles.
Numerical Parameters:
---------------------
dx = dy = 0.012m
h = 0.0156 => h/dx = 1.3
Height of Water column = 2m
Length of Water column = 1m
Number of particles = 27639 + 1669 = 29308
ro = 1000.0
co = 10*sqrt(2*9.81*2) ~ 65.0
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import warnings
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
from pysph.tools import geometry_utils as geom
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
fluid_column_height = 2.0
fluid_column_width = 1.0
container_height = 3.0
container_width = 4.0
#h = 0.0156
h = 0.0390
#h = 0.01
dx = dy = 0.03
ro = 1000.0
co = 10.0 * numpy.sqrt(2*9.81*fluid_column_height)
gamma = 7.0
alpha = 0.3
eps = 0.5
B = co*co*ro/gamma
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
xb1, yb1 = geom.create_2D_tank(x1=0, y1=0,
x2=container_width, y2=container_height,
dx=dx)
xb2, yb2 = geom.create_2D_tank(x1=-dx/2, y1=-dx/2,
x2=container_width, y2=container_height,
dx=dx)
xb = numpy.concatenate((xb1, xb2))
yb = numpy.concatenate((yb1, yb2))
hb = numpy.ones_like(xb)*h
mb = numpy.ones_like(xb)*dx*dy*ro*0.5
rhob = numpy.ones_like(xb) * ro
cb = numpy.ones_like(xb)*co
boundary = base.get_particle_array(cl_precision="single",
name="boundary", type=Solid,
x=xb, y=yb, h=hb, rho=rhob, cs=cb,
m=mb)
print 'Number of Boundary particles: ', len(xb)
return boundary
def get_fluid_particles():
xf1, yf1 = geom.create_2D_filled_region(x1=dx, y1=dx,
x2=fluid_column_width,
y2=fluid_column_height,
dx=dx)
xf2, yf2 = geom.create_2D_filled_region(x1=dx/2, y1=dx/2,
x2=fluid_column_width,
y2=fluid_column_height,
dx=dx)
x = numpy.concatenate((xf1, xf2))
y = numpy.concatenate((yf1, yf2))
print 'Number of fluid particles: ', len(x)
hf = numpy.ones_like(x) * h
mf = numpy.ones_like(x) * dx * dy * ro * 0.5
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
fluid = base.get_particle_array(cl_precision="single",
name="fluid", type=Fluid,
x=x, y=y, h=hf, m=mf, rho=rhof,
cs=csf)
return fluid
def get_particles(**args):
fluid = get_fluid_particles()
boundary = get_boundary_particles()
return [fluid, boundary]
app = solver.Application()
integrator_type = solver.PredictorCorrectorIntegrator
s = solver.Solver(dim=2, integrator_type=integrator_type)
kernel = base.CubicSplineKernel(dim=2)
# define the artificial pressure term for the momentum equation
deltap = dx
n = 4
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(hks=False, co=co, ro=ro),
on_types=[Fluid, Solid],
updates=['p', 'cs'],
id='eos'),
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
on_types=[Fluid, Solid], from_types=[Fluid, Solid],
updates=['rho'], id='density')
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=0.0, hks=False,
deltap=None, n=n),
on_types=[Fluid], from_types=[Fluid, Solid],
updates=['u','v'], id='mom')
)
#s.add_operation(solver.SPHIntegration(
# sph.SPHPressureGradient.withargs(),
# on_types=[Fluid], from_types=[Fluid,Solid],
# updates=['u','v'], id='pgrad')
# )
#s.add_operation(solver.SPHIntegration(
# sph.MonaghanArtificialVsicosity.withargs(alpha=alpha, beta=0.0),
# on_types=[Fluid], from_types=[Fluid,Solid],
# updates=['u','v'], id='avisc')
# )
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
# Position stepping and XSPH correction operations
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
dt = 1e-4
s.set_final_time(3.0)
s.set_time_step(dt)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles, min_cell_size=4*h,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager_type=base.DomainManagerType.LinkedListManager,
cl_locator_type=base.OpenCLNeighborLocatorType.LinkedListSPHNeighborLocator
)
# this tells the solver to compute the max time step dynamically
#s.time_step_function = solver.ViscousTimeStep(co=co,cfl=0.3,
# particles=s.particles)
s.time_step_function = solver.ViscousAndForceBasedTimeStep(co=co, cfl=0.3,
particles=s.particles)
if app.options.with_cl:
msg = """\n\n
You have chosen to run the example with OpenCL support. The only
integrator with OpenCL support is the forward Euler
integrator. This integrator will be used instead of the default
predictor corrector integrator for this example.\n\n
"""
warnings.warn(msg)
integrator_type = solver.EulerIntegrator
app.run()
| Python |
""" 2D Dam Break Over a dry bed. The case is described in "State of
the art classical SPH for free surface flows", Benedict D Rogers,
Robert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic
Research, Vol 48, Extra Issue (2010), pp 6-27
Setup:
------
x x !
x x !
x x !
x x !
x o o o x !
x o o x !3m
x o o o x !
x o o x !
x o o o x !
x x !
xxxxxxxxxxxxxxxxxxxxx | o -- Fluid Particles
x -- Solid Particles
-dx- dx = dy
_________4m___________
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
Fluid particles are placed on a staggered grid. The nodes of the grid
are located at R = l*dx i + m * dy j with a two point bias (0,0) and
(dx/2, dy/2) refered to the corner defined by R. l and m are integers
and i and j are the unit vectors alon `X` and `Y` respectively.
For the Monaghan Type Repulsive boundary condition, a single row of
boundary particles is used with a boundary spacing delp = dx = dy.
For the Dynamic Boundary Conditions, a staggered grid arrangement is
used for the boundary particles.
Numerical Parameters:
---------------------
dx = dy = 0.012m
h = 0.0156 => h/dx = 1.3
Height of Water column = 2m
Length of Water column = 1m
Number of particles = 27639 + 1669 = 29308
ro = 1000.0
co = 10*sqrt(2*9.81*2) ~ 65.0
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import warnings
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
#h = 0.0156
h = 0.0390
#h = 0.01
dx = dy = h/1.3
ro = 1000.0
co = 65.0
gamma = 7.0
alpha = 0.5
eps = 0.5
fluid_column_height = 2.0
fluid_column_width = 1.0
container_height = 3.0
container_width = 6.0
B = co*co*ro/gamma
def get_1D_grid(start, end, spacing):
""" Return an array of points in 1D
Parameters:
-----------
start -- the starting coordinate value
end -- the ending coordinate value
spacing -- the uniform spacing between the points
Notes:
------
Uses numpy arange to get the points!
"""
return numpy.arange(start, end+1e-10, spacing)
def get_2D_grid(start_point, end_point, spacing):
""" Return a 2D array of points by calling numpy's mgrid
Parameters:
-----------
start_point -- the starting corner point for the rectangle
end_point -- the ending corner point for the rectangle
spacing -- uniform spacing in x and y
"""
x, y = numpy.mgrid[start_point.x:end_point.x:spacing,
start_point.y:end_point.y:spacing]
x = x.ravel(); y = y.ravel()
return x, y
def get_2D_staggered_grid(bias_point_1, bias_point_2, end_point, spacing):
""" Return a staggered cartesian grid in 2D
Parameters:
-----------
bias_point_1 -- the first grid starting point
bias_point_2 -- the second grid starting point
end_point -- the maximum `x` and `y` for the grid
spacing -- uniform spacing in `x` and `y`
"""
x1, y1 = get_2D_grid(bias_point_1, end_point, spacing)
x2, y2 = get_2D_grid(bias_point_2, end_point, spacing)
x = numpy.zeros(len(x1)+len(x2), float)
y = numpy.zeros(len(x1)+len(x2), float)
x[:len(x1)] = x1; y[:len(x1)] = y1
x[len(x1):] = x2; y[len(x1):] = y2
return x, y
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
#left wall
ylw = get_1D_grid(0, container_height, dy)
xlw = numpy.zeros_like(ylw)
nb1 = len(ylw)
#bottom
xbs = get_1D_grid(dx, container_width+dx, dx)
ybs = numpy.zeros_like(xbs)
nb3 = len(xbs)
max_xb = numpy.max(xbs)
#staggered left wall
yslw = get_1D_grid(-dx/2, container_height, dx)
xslw = numpy.ones_like(yslw) * -dx/2
nb4 = len(yslw)
#staggered bottom
xsb = get_1D_grid(dx/2, container_width+dx+dx, dx)
ysb = numpy.ones_like(xsb) * -dy/2
nb6 = len(xsb)
max_xsb = numpy.max(xsb)
#right wall
yrw = numpy.arange(dx, container_height, dx)
xrw = numpy.ones_like(yrw) * max_xb
nb2 = len(yrw)
#staggered right wall
ysrw = numpy.arange(dy/2, container_height, dy)
xsrw = numpy.ones_like(ysrw) * max_xsb
nb5 = len(ysrw)
nb = nb1 + nb2 + nb3 + nb4 + nb5 + nb6
print "Number of Boundary Particles: ", nb
xb = numpy.zeros(nb, float)
yb = numpy.zeros(nb, float)
idx = 0
xb[:nb1] = xlw; yb[:nb1] = ylw
idx += nb1
xb[idx:idx+nb2] = xrw; yb[idx:idx+nb2] = yrw
idx += nb2
xb[idx:idx+nb3] = xbs; yb[idx:idx+nb3] = ybs
idx += nb3
xb[idx:idx+nb4] = xslw; yb[idx:idx+nb4] = yslw
idx += nb4
xb[idx:idx+nb5] = xsrw; yb[idx:idx+nb5] = ysrw
idx += nb5
xb[idx:] = xsb; yb[idx:] = ysb
hb = numpy.ones_like(xb)*h
mb = numpy.ones_like(xb)*dx*dy*ro
rhob = numpy.ones_like(xb) * ro
cb = numpy.ones_like(xb)*co
boundary = base.get_particle_array(name="boundary", type=Solid,
x=xb, y=yb, h=hb, rho=rhob, cs=cb,
m=mb)
width = max_xb
return boundary, width
def get_fluid_particles(name="fluid"):
x, y = get_2D_staggered_grid(base.Point(dx, dx), base.Point(dx/2, dx/2),
base.Point(1.0,2.0), dx)
hf = numpy.ones_like(x) * h
mf = numpy.ones_like(x) * dx * dy * ro
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
fluid = base.get_particle_array(name=name, type=Fluid,
x=x, y=y, h=hf, m=mf, rho=rhof, cs=csf)
return fluid
def get_particles(**args):
boundary, width = get_boundary_particles()
fluid1 = get_fluid_particles(name="fluid1")
fluid2 = get_fluid_particles(name="fluid2")
fluid2.x = width - fluid2.x
print 'Number of fluid particles: ', len(fluid1.x) + len(fluid2.x)
return [fluid1, fluid2, boundary]
app = solver.Application()
integrator_type = solver.RK2Integrator
kernel = base.HarmonicKernel(dim=2, n=3)
s = solver.Solver(dim=2, integrator_type=integrator_type)
s.default_kernel = kernel
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(co=co, ro=ro),
on_types=[Fluid, Solid],
updates=['p', 'cs'],
id='eos')
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(),
on_types=[Fluid, Solid], from_types=[Fluid, Solid],
updates=['rho'], id='density')
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=0.0),
on_types=[Fluid], from_types=[Fluid, Solid],
updates=['u','v'], id='mom')
)
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
# Position stepping and XSPH correction operations
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
s.set_final_time(10)
s.set_time_step(1e-4)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager=base.DomainManagerType.DomainManager,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator
)
if app.options.with_cl:
msg = """\n\n
You have chosen to run the example with OpenCL support. The only
integrator with OpenCL support is the forward Euler
integrator. This integrator will be used instead of the default
RK2 integrator for this example.\n\n
"""
warnings.warn(msg)
integrator_type = solver.EulerIntegrator
app.run()
| Python |
""" A tiny dam break problem
Setup:
------
x x !
x x !
x x !
x x !
x o o o o o x !
x o o o o o x !3m
x o o o o o x !
x o o o o o x !
x o o o o o x !
x x !
xxxxxxxxxxxxxxxxxxxxx | o -- Fluid Particles
x -- Solid Particles
-dx- dx = dy
_________4m___________
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
The Monaghan Type Repulsive boundary condition, with a single row of
boundary particles is used with a boundary spacing delp = dx = dy.
Numerical Parameters:
---------------------
h = 0.05
dx = dy = h/1.25 = 0.04
Height of Water column = 2m
Length of Water column = 1m
Number of fluid particles = 1250
ro = 1000.0
co = 10*sqrt(2*9.81*2) ~ 65.0
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import sys
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
h = 0.05
dx = dy = h/1.25
ro = 1000.0
co = 65.0
gamma = 7.0
alpha = 0.5
eps = 0.5
fluid_column_height = 2.0
fluid_column_width = 1.0
container_height = 3.0
container_width = 4.0
B = co*co*ro/gamma
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
left = base.Line(base.Point(0,0), container_height, numpy.pi/2)
bottom = base.Line(base.Point(container_width,0),
container_width, numpy.pi)
right = base.Line(base.Point(container_width,container_height),
container_height, 1.5*numpy.pi)
g = base.Geometry('box', [left, bottom, right], is_closed=False)
g.mesh_geometry(dx)
boundary = g.get_particle_array(re_orient=False,
name="boundary")
return boundary
def get_fluid_particles():
xarr = numpy.arange(dx, 1.0 + dx, dx)
yarr = numpy.arange(dx, 2.0 + dx, dx)
x,y = numpy.meshgrid( xarr, yarr )
x, y = x.ravel(), y.ravel()
print 'Number of fluid particles: ', len(x)
hf = numpy.ones_like(x) * h
mf = numpy.ones_like(x) * dx * dy * ro
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
fluid = base.get_particle_array(name="fluid", type=Fluid,
x=x, y=y, h=hf, m=mf, rho=rhof, cs=csf)
return fluid
def get_particles(**args):
fluid = get_fluid_particles()
boundary = get_boundary_particles()
return [fluid, boundary]
app = solver.Application()
s = solver.Solver(dim=2, integrator_type=solver.EulerIntegrator)
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(hks=False, co=co, ro=ro),
on_types=[Fluid],
updates=['p', 'cs'],
id='eos'),
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
on_types=[Fluid], from_types=[Fluid],
updates=['rho'], id='density')
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=0.0, hks=False),
on_types=[Fluid], from_types=[Fluid],
updates=['u','v'], id='mom')
)
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
#the boundary force
s.add_operation(solver.SPHIntegration(
sph.MonaghanBoundaryForce.withargs(delp=dx),
on_types=[Fluid], from_types=[Solid], updates=['u','v'],
id='bforce')
)
# Position stepping and XSPH correction operations
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
dt = 1e-4
s.set_final_time(3.0)
s.set_time_step(dt)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles, min_cell_size=2*h,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager=base.DomainManagerType.DomainManager,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator
)
if app.options.with_cl:
raise RuntimeError("OpenCL support not added for MonaghanBoundaryForce!")
s.set_print_freq(1000)
app.run()
| Python |
""" 2D Dam Break Over a dry bed. The case is described in "State of
the art classical SPH for free surface flows", Benedict D Rogers,
Robert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic
Research, Vol 48, Extra Issue (2010), pp 6-27
Setup:
------
x x !
x x !
x x !
x x !
x o o o x !
x o o x !3m
x o o o x !
x o o x !
x o o o x !
x x !
xxxxxxxxxxxxxxxxxxxxx | o -- Fluid Particles
x -- Solid Particles
-dx- dx = dy
_________4m___________
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
Fluid particles are placed on a staggered grid. The nodes of the grid
are located at R = l*dx i + m * dy j with a two point bias (0,0) and
(dx/2, dy/2) refered to the corner defined by R. l and m are integers
and i and j are the unit vectors alon `X` and `Y` respectively.
For the Monaghan Type Repulsive boundary condition, a single row of
boundary particles is used with a boundary spacing delp = dx = dy.
For the Dynamic Boundary Conditions, a staggered grid arrangement is
used for the boundary particles.
Numerical Parameters:
---------------------
dx = dy = 0.012m
h = 0.0156 => h/dx = 1.3
Height of Water column = 2m
Length of Water column = 1m
Number of particles = 27639 + 1669 = 29308
ro = 1000.0
co = 10*sqrt(2*9.81*2) ~ 65.0
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import warnings
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
from pysph.tools import geometry_utils as geom
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
#h = 0.0156
h = 0.039
#h = 0.01
dx = dy = h/1.3
ro = 1000.0
co = 65.0
gamma = 7.0
alpha = 0.5
eps = 0.5
fluid_column_height = 2.0
fluid_column_width = 1.0
container_height = 3.0
container_width = 4.0
B = co*co*ro/gamma
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
xb1, yb1, zb1 = geom.create_3D_tank(0, 0, 0, container_width, container_height, container_width/2, dx)
xb2, yb2, zb2 = geom.create_3D_tank(-dx/2, -dx/2, -dx/2, container_width, container_height,
container_width/2, dx)
xb = numpy.concatenate((xb1, xb2))
yb = numpy.concatenate((yb1, yb2))
zb = numpy.concatenate((zb1, zb2))
hb = numpy.ones_like(xb)*h
mb = numpy.ones_like(xb)*dx*dy*dx*ro*0.5
rhob = numpy.ones_like(xb) * ro
cb = numpy.ones_like(xb)*co
boundary = base.get_particle_array(name="boundary", type=Solid,
x=xb, y=yb, z=zb, h=hb, rho=rhob, cs=cb,
m=mb)
print 'Number of Boundary particles: ', len(xb)
return boundary
def get_fluid_particles():
xf1, yf1, zf1 = geom.create_3D_filled_region(dx, dx, dx,fluid_column_width, fluid_column_height,
fluid_column_width/2, dx)
xf2, yf2, zf2 = geom.create_3D_filled_region(dx/2, dx/2, dx/2, fluid_column_width, fluid_column_height,
fluid_column_width/2, dx)
x = numpy.concatenate((xf1, xf2))
y = numpy.concatenate((yf1, yf2))
z = numpy.concatenate((zf1, zf2))
print 'Number of fluid particles: ', len(x)
hf = numpy.ones_like(x) * h
mf = numpy.ones_like(x) * dx * dy * dx * ro * 0.5
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
fluid = base.get_particle_array(name="fluid", type=Fluid,
x=x, y=y, z=z, h=hf, m=mf, rho=rhof, cs=csf)
return fluid
def get_particles(**args):
fluid = get_fluid_particles()
boundary = get_boundary_particles()
return [fluid, boundary]
app = solver.Application()
integrator_type = solver.PredictorCorrectorIntegrator
s = solver.Solver(dim=2, integrator_type=integrator_type)
kernel = base.CubicSplineKernel(dim=2)
# define the artificial pressure term for the momentum equation
deltap = -1/1.3
n = 4
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(hks=False, co=co, ro=ro),
on_types=[Fluid, Solid],
updates=['p', 'cs'],
id='eos'),
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
on_types=[Fluid, Solid], from_types=[Fluid, Solid],
updates=['rho'], id='density')
)
#momentum equation
# s.add_operation(solver.SPHIntegration(
# sph.MomentumEquation.withargs(alpha=alpha, beta=0.0, hks=False,
# deltap=deltap, n=n),
# on_types=[Fluid], from_types=[Fluid, Solid],
# updates=['u','v'], id='mom')
# )
s.add_operation(solver.SPHIntegration(
sph.SPHPressureGradient.withargs(),
on_types=[Fluid], from_types=[Fluid,],
updates=['u','v','z'], id='pgrad')
)
s.add_operation(solver.SPHIntegration(
sph.MonaghanArtificialViscosity.withargs(alpha=alpha, beta=0.0),
on_types=[Fluid], from_types=[Fluid,Solid],
updates=['u','v','z'], id='avisc')
)
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v','z'],id='gravity')
)
# Position stepping and XSPH correction operations
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
dt = 1.25e-4
s.set_final_time(3.0)
s.set_time_step(dt)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles, min_cell_size=4*h,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager=base.DomainManagerType.DomainManager,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator
)
# this tells the solver to compute the max time step dynamically
s.time_step_function = solver.ViscousTimeStep(co=co,cfl=0.3,
particles=s.particles)
if app.options.with_cl:
msg = """\n\n
You have chosen to run the example with OpenCL support. The only
integrator with OpenCL support is the forward Euler
integrator. This integrator will be used instead of the default
predictor corrector integrator for this example.\n\n
"""
warnings.warn(msg)
integrator_type = solver.EulerIntegrator
app.run()
| Python |
""" Dam break simulation over a wet bed.
This is part of the SPHERIC validation test cases (case 5)
(http://wiki.manchester.ac.uk/spheric/index.php/SPHERIC_Home_Page)
The main reference for this test case is 'State-of-the-art classical SPH for free-surface flows' by Moncho Gomez-Gesteira and Benedict D. Rogers and Robert
A. Dalrymple and Alex J. Crespo, Journal of Hydraulic Research Extra
Issue (2010) pp 6-27
"""
import numpy
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.tools.geometry_utils as geom
# Geometric parameters
dx = 0.005
h0 = 0.006
d = 0.0180
H = 0.15
tank_length = 0.38 + 3.0 #9.55
tank_height = 0.2
# Numerical parameters
vmax = numpy.sqrt(2*9.81*H)
co = 10.0 * vmax
ro = 1000.0
B = co*co*ro/7.0
alpha = 0.08
beta = 0.0
eps = 0.5
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
# get the tank
xt1, yt1 = geom.create_2D_tank(x1=0, y1=0,
x2=tank_length, y2=tank_height,
dx=dx)
xt2, yt2 = geom.create_2D_tank(x1=-dx/2, y1=-dx/2,
x2=tank_length + dx/2, y2=tank_height+dx/2,
dx=dx)
x = numpy.concatenate( (xt1, xt2) )
y = numpy.concatenate( (yt1, yt2) )
h = numpy.ones_like(x) * h0
m = numpy.ones_like(x) * ro*dx*dx*0.5
rho = numpy.ones_like(x) * ro
cs = numpy.ones_like(x) * co
tank = base.get_particle_array(cl_precision="single", name="tank",
type=Solid, x=x,y=y,m=m,rho=rho,h=h,cs=cs)
np = tank.get_number_of_particles()
# create the gate
y1 = numpy.arange(dx/2, tank_height+1e-4, dx/2)
x1 = numpy.ones_like(y1)*(0.38-dx/2)
y2 = numpy.arange(dx/2+dx/4, tank_height+1e-4, dx/2)
x2 = numpy.ones_like(y2)*(0.38-dx)
y3 = numpy.arange(dx/2, tank_height+1e-4, dx/2)
x3 = numpy.ones_like(y3)*(0.38-1.5*dx)
x = numpy.concatenate( (x1, x2, x3) )
y = numpy.concatenate( (y1, y2, y3) )
h = numpy.ones_like(x) * h0
m = numpy.ones_like(x) * 0.5 * dx/2 * dx/2 * ro
rho = numpy.ones_like(x) * ro
cs = numpy.ones_like(x) * co
v = numpy.ones_like(x) * 1.5
gate = base.get_particle_array(cl_precision="single", name="gate",
x=x, y=y, m=m, rho=rho, h=h, cs=cs,
v=v,
type=Solid)
np += gate.get_number_of_particles()
print "Number of solid particles = %d"%(np)
return [tank, gate]
def get_fluid_particles():
# create the dam
xf1, yf1 = geom.create_2D_filled_region(x1=dx, y1=dx,
x2=0.38-2*dx,
y2=0.15,
dx=dx)
xf2, yf2 = geom.create_2D_filled_region(x1=dx/2, y1=dx/2,
x2=0.38-2*dx,
y2=0.15,
dx=dx)
# create the bed
xf3, yf3 = geom.create_2D_filled_region(x1=0.38+dx/2, y1=dx/2,
x2=tank_length-dx, y2=d,
dx=dx)
xf4, yf4 = geom.create_2D_filled_region(x1=0.38, y1=dx,
x2=tank_length-dx/2, y2=d,
dx=dx)
x = numpy.concatenate( (xf1, xf2, xf3, xf4) )
y = numpy.concatenate( (yf1, yf2, yf3, yf4) )
hf = numpy.ones_like(x) * h0
mf = numpy.ones_like(x) * dx * dx * ro * 0.5
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
rhop = numpy.ones_like(x) * ro
fluid = base.get_particle_array(cl_precision="single",
name="fluid", type=Fluid,
x=x, y=y, h=hf, m=mf, rho=rhof,
cs=csf, rhop=rhop)
np = fluid.get_number_of_particles()
print "Number of fluid particles = %d"%(np)
return fluid
def get_particles(**args):
fluid = get_fluid_particles()
tank, gate = get_boundary_particles()
return [fluid, tank, gate]
app = solver.Application()
s = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
kernel = base.CubicSplineKernel(dim=2)
# define the artificial pressure term for the momentum equation
deltap = -1/1.3
n = 4
# pilot rho
s.add_operation(solver.SPHOperation(
sph.ADKEPilotRho.withargs(h0=h0),
on_types=[base.Fluid], from_types=[base.Fluid, base.Solid],
updates=['rhop'], id='adke_rho'),
)
# smoothing length update
s.add_operation(solver.SPHOperation(
sph.ADKESmoothingUpdate.withargs(h0=h0, k=0.7, eps=0.5, hks=False),
on_types=[base.Fluid], updates=['h'], id='adke'),
)
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(hks=False, co=co, ro=ro),
on_types=[Fluid, Solid],
updates=['p', 'cs'],
id='eos'),
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
on_types=[Fluid, Solid], from_types=[Fluid, Solid],
updates=['rho'], id='density')
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=0.0, hks=False,
deltap=deltap, n=n),
on_types=[Fluid], from_types=[Fluid, Solid],
updates=['u','v'], id='mom')
)
#s.add_operation(solver.SPHIntegration(
# sph.SPHPressureGradient.withargs(),
# on_types=[Fluid], from_types=[Fluid,Solid],
# updates=['u','v'], id='pgrad')
# )
#s.add_operation(solver.SPHIntegration(
# sph.MonaghanArtificialVsicosity.withargs(alpha=alpha, beta=0.0),
# on_types=[Fluid], from_types=[Fluid,Solid],
# updates=['u','v'], id='avisc')
# )
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
# Position stepping and XSPH correction operations
s.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[base.Fluid,base.Solid],
updates=["x","y"],
id="step")
)
s.add_operation(solver.SPHIntegration(
sph.XSPHCorrection.withargs(),
on_types=[base.Fluid,], from_types=[base.Fluid,],
updates=["x","y"],
id="xsph")
)
dt = 1.25e-4
s.set_final_time(1.5)
s.set_time_step(dt)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles, min_cell_size=4*h0,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager=base.DomainManagerType.DomainManager,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator
)
# this tells the solver to compute the max time step dynamically
s.time_step_function = solver.ViscousTimeStep(co=co,cfl=0.3,
particles=s.particles)
app.run()
| Python |
""" An example Script to study the behavior of Monaghan type repulsive
particles (Smoothed Particle Hydrodynamics, Reports on Progresses in
Physics)
The boundary particles are an improvement over the Lenard Jones type
repulsive boundary particles. One of the main features is that a
particle moving parallel to the wall will experience the same force.
The force exerted on a boundary particle is
f = f1(x)*f2(y) nk
where f1 is a function of the component of the projection of the
vector rab onto the tangential direction and f2 is a function of the
component of the normal projection of rab.
Each boundary particle must have therefore an associated normal and
tangent.
The setup is described as Test 1 of "Boundary Conditions Generated by
Dynamic Particles in SPH Methods" by A.J.C. Crespo and
M. Gomez-Gesteria and R.A. Dalrymple, CMC, vol 5, no 3 pp 173-184
Setup:
------
o [0, 0.3]
x x x x x x x
-----
dp
o -- fluid particle
x -- boundary particles
Y
|
| Z
| /
| /
|/_______X
The fluid particle falls under the influence of gravity and interacts
with the boundary particles. When the particle `sees` the boundary
particle for the interaction of the boundary force term, a repulsion
is activated on the fluid particle.
Behavior:
---------
We study the motion of the fluid particle in this simple configuration.
From the output files, observe the motion (`x` vs `y`) of the particle.
A state space plot of Velocity (`v`) V/S Position (`y`) should ideally
be a closed loop implying the conservation of energy.
An alternative setup could be switching off gravity and imposing an
initial velocity on the particle directed towards the boundary. We can
study the ability of the method to prevent penetration by observing
the minimum distance 'y' from the wall for increasing velocities.
Parameters:
-----------
The maximum velocity is estimated as Vmax = sqrt(2*9.81*0.3) and the
numerical sound speed is taken as 10*Vmax ~ 25.0 m/s
The reference density is taken as 1.0
h = 2.097e-2
dx = dy = h/(1.3)
g = -9.81
Running:
--------
run like so:
python monaghanbc.py --freq <print-freq> --directory ./monaghanbc
"""
import logging, numpy
import sys
import pysph.solver.api as solver
import pysph.sph.api as sph
import pysph.base.api as base
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
fname = sys.argv[0][:-3]
app = solver.Application(fname=fname)
#global variables
h = 2.097e-2
dx = dy = h/(1.3)
g = -9.81
xf = numpy.array([0])
yf = numpy.array([0.3])
hf = numpy.array([h])
mf = numpy.array([1.0])
vf = numpy.array([0.0])
cf = numpy.array([25.0])
rhof = numpy.array([1.0])
fluid = base.get_particle_array(name="fluid", type=Fluid, x=xf, y=yf,
h=hf, m=mf, rho=rhof, v=vf, cs=cf)
#generate the boundary
l = base.Line(base.Point(-.5), 1.0, 0)
g = base.Geometry('line', [l], False)
g.mesh_geometry(dx)
boundary = g.get_particle_array(re_orient=True)
boundary.m[:] = 1.0
particles = base.Particles(arrays=[fluid, boundary])
app.particles = particles
kernel = base.HarmonicKernel(dim=2, n=3)
s = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
# set the kernel as the default for the solver
s.default_kernel = kernel
#Tait equation
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(co=25.0, ro=1.0),
on_types=[Fluid],
updates=['p','cs'],
id='eos', kernel=kernel)
)
#continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(), from_types=[Fluid],
on_types=[Fluid],
updates=['rho'], id='density', kernel=kernel)
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=0.0, beta=0.0,),
on_types=[Fluid],
from_types=[Fluid],
updates=['u','v'], id='mom')
)
#gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
#the boundary force
s.add_operation(solver.SPHIntegration(
sph.MonaghanBoundaryForce.withargs(delp=dx),
on_types=[Fluid], from_types=[Solid], updates=['u','v'],
id='bforce')
)
#xsph correction
s.add_operation(solver.SPHIntegration(
sph.XSPHCorrection.withargs(eps=0.1),
from_types=[Fluid],
on_types=[Fluid], updates=['x','y'], id='xsph')
)
#Position stepping
s.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluid],
updates=['x','y'], id='step')
)
s.set_final_time(1)
s.set_time_step(1e-4)
app.setup(s)
app.run()
| Python |
""" A simple example in which two drops collide """
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
import numpy
def get_circular_patch(name="", type=0, dx=0.05):
x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx]
x = x.ravel()
y = y.ravel()
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*2*dx
rho = numpy.ones_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 100.0
u = 0*x
v = 0*y
indices = []
for i in range(len(x)):
if numpy.sqrt(x[i]*x[i] + y[i]*y[i]) - 1 > 1e-10:
indices.append(i)
pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v,
cs=cs,name=name, type=type)
la = base.LongArray(len(indices))
la.set_data(numpy.array(indices))
pa.remove_particles(la)
pa.set(idx=numpy.arange(len(pa.x)))
return pa
def get_particles():
f1 = get_circular_patch("fluid1")
xlow, xhigh = min(f1.x), max(f1.x)
f1.x += 1.2*(xhigh - xlow)
f1.u[:] = -1.0
f2 = get_circular_patch("fluid2")
f2.u[:] = +1.0
print "Number of particles: ", f1.get_number_of_particles() * 2.0
return [f1,f2]
app = solver.Application()
kernel = base.CubicSplineKernel(dim=2)
s = solver.FluidSolver(dim=2,
integrator_type=solver.PredictorCorrectorIntegrator)
s.set_final_time(1.0)
s.set_time_step(1e-4)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles)
app.run()
| Python |
""" NBody Example """
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
import numpy
Fluid = base.ParticleType.Fluid
# number of particles, time step and final time
np = 1024
dt = 1e-2
tf = 10.0
nsteps = tf/dt
def get_particles(**kwargs):
x = numpy.random.random(np) * 2.0 - 1.0
y = numpy.random.random(np) * 2.0 - 1.0
z = numpy.random.random(np) * 2.0 - 1.0
u = numpy.random.random(np) * 2.0 - 1.0
v = numpy.random.random(np) * 2.0 - 1.0
w = numpy.random.random(np) * 2.0 - 1.0
m = numpy.random.random(np)*100
pa = base.get_particle_array(name="test", cl_precision="single",
type=Fluid, x=x, y=y, z=z, m=m, u=u,
v=v, w=w)
return pa
app = solver.Application()
s = solver.Solver(dim=3,
integrator_type=solver.EulerIntegrator)
s.add_operation(solver.SPHIntegration(
sph.NBodyForce.withargs(),
on_types=[Fluid], from_types=[Fluid],
updates=['u','v','w'], id='nbody_force')
)
s.add_operation_step([Fluid])
app.setup(
solver=s,
variable_h=False, create_particles=get_particles,
locator_type=base.NeighborLocatorType.NSquareNeighborLocator,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator,
domain_manager=base.DomainManager
)
s.set_final_time(tf)
s.set_time_step(dt)
s.set_print_freq(nsteps + 1)
app.run()
| Python |
""" Shock tube problem with the ADKE procedure of Sigalotti """
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
from pysph.base.kernels import CubicSplineKernel
import numpy
Fluid = base.ParticleType.Fluid
Boundary = base.ParticleType.Boundary
# Shock tube parameters
nl = int(320 * 7.5)
nr = int(80 * 7.5)
dxl = 0.6/nl
dxr = 4*dxl
h0 = 2*dxr
eps = 0.8
k = 0.7
beta = 1.0
K = 1.0
f = 0.5
hks = False
class UpdateBoundaryParticles:
def __init__(self, particles):
self.particles = particles
def eval(self):
left = self.particles.get_named_particle_array('left')
right = self.particles.get_named_particle_array("right")
fluid = self.particles.get_named_particle_array("fluid")
left.h[:] = fluid.h[0]
right.h[:] = fluid.h[-1]
def get_fluid_particles(**kwargs):
pa = solver.shock_tube_solver.standard_shock_tube_data(
name="fluid", nl=nl, nr=nr)
pa.add_property({'name':'rhop','type':'double'})
pa.add_property({'name':'div', 'type':'double'})
pa.add_property( {'name':'q', 'type':'double'} )
return pa
def get_boundary_particles(**kwargs):
# left boundary
x = numpy.ones(50)
for i in range(50):
x[i] = -0.6 - (i+1) * dxl
m = numpy.ones_like(x) * dxl
h = numpy.ones_like(x) * 2*dxr
rho = numpy.ones_like(x)
u = numpy.zeros_like(x)
e = numpy.ones_like(x) * 2.5
p = (0.4) * rho * e
cs = numpy.sqrt( 1.4*p/rho )
left = base.get_particle_array(name="left", type=Boundary,
x=x, m=m, h=h, rho=rho, u=u,
e=e, cs=cs, p=p)
# right boundary
for i in range(50):
x[i] = 0.6 + (i + 1)*dxr
m = numpy.ones_like(x) * dxl
h = numpy.ones_like(x) * 2*dxr
rho = numpy.ones_like(x) * 0.25
u = numpy.zeros_like(x)
e = numpy.ones_like(x) * 1.795
p = (0.4) * rho * e
#cs = numpy.sqrt(0.4*e)
cs = numpy.sqrt( 1.4*p/rho )
right = base.get_particle_array(name="right", type=Boundary,
x=x, m=m, h=h, rho=rho, u=u,
e=e, cs=cs,p=p)
return [left, right]
def get_particles(**kwargs):
particles = []
particles.append(get_fluid_particles())
particles.extend(get_boundary_particles())
return particles
# Create the application
app = solver.Application()
# define the solver and kernel
#s = solver.Solver(dim=1, integrator_type=solver.RK2Integrator)
s = solver.MonaghanShockTubeSolver(dim=1, integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k,
beta=beta, K=K, f=f)
#############################################################
# ADD OPERATIONS
#############################################################
# # pilot rho
# s.add_operation(solver.SPHOperation(
# sph.ADKEPilotRho.withargs(h0=h0),
# on_types=[Fluid], from_types=[Fluid,Boundary],
# updates=['rhop'], id='adke_rho'),
# )
# # smoothing length update
# s.add_operation(solver.SPHOperation(
# sph.ADKESmoothingUpdate.withargs(h0=h0, k=k, eps=eps, hks=hks),
# on_types=[Fluid], updates=['h'], id='adke'),
# )
# # summation density
# s.add_operation(solver.SPHOperation(
# sph.SPHRho.withargs(hks=hks),
# from_types=[Fluid, Boundary], on_types=[Fluid],
# updates=['rho'], id = 'density')
# )
# # ideal gas equation
# s.add_operation(solver.SPHOperation(
# sph.IdealGasEquation.withargs(),
# on_types = [Fluid], updates=['p', 'cs'], id='eos')
# )
# # momentum equation pressure equation
# s.add_operation(solver.SPHIntegration(
# sph.SPHPressureGradient.withargs(),
# from_types=[Fluid, Boundary], on_types=[Fluid],
# updates=['u'], id='mom')
# )
# #momentum equation visc
# s.add_operation(solver.SPHIntegration(
# sph.MomentumEquationSignalBasedViscosity.withargs(beta=1.0, K=1.0),
# on_types=[base.Fluid,], from_types=[base.Fluid, base.Boundary],
# updates=['u'],
# id="momvisc")
# )
# # energy equation
# s.add_operation(solver.SPHIntegration(
# sph.EnergyEquationWithSignalBasedViscosity.withargs(beta=1.0, K=1.0, f=0.5),
# on_types=[Fluid], from_types=[Fluid, Boundary],
# updates=['e'],
# id='enr')
# )
# # position stepping
# s.add_operation(solver.SPHIntegration(
# sph.PositionStepping.withargs(),
# on_types=[base.Fluid],
# updates=['x'],
# id="step")
# )
s.set_final_time(0.15)
s.set_time_step(3e-4)
app.setup(
solver=s,
min_cell_size = 4*h0,
variable_h=True, create_particles=get_particles,
locator_type=base.NeighborLocatorType.SPHNeighborLocator
)
# add the boundary update function to the particles
s.particles.add_misc_function( UpdateBoundaryParticles(s.particles) )
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
beta=beta, K=K, f=f, hks=hks)
app.run()
| Python |
""" Sjogreen's test case """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as get_data
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
# shock tube parameters
xl = -1.0; xr = 1.0
pl = 0.4; pr = 0.4
ul = -2.0; ur = 2.0
rhol = 1.0; rhor = 1.0
# Number of particles
nl = 400
nr = 400
np = nl + nr
# Time step constants
dt = 1e-3
tf = 0.3
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.5
k=1.0
h0 = 2.5*xr/nr
# Artificial Heat constants
g1 = 0.1
g2 = 1.0
kernel = base.CubicSplineKernel
hks=False
def get_particles(with_boundary=False, **kwargs):
adke, left, right = get_data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma)
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1, integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta,gamma=gamma,
kernel=kernel, hks=hks)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| Python |
""" Standard shock tube problem by Monaghan """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as data
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
kernel = base.CubicSplineKernel
hks=False
# shock tube parameters
xl = -0.1; xr = 0.1
pl = 4e-7; pr = 4e-7
ul = 1.0; ur = -1.0
rhol = 1.0; rhor = 1.0
gamma = 1.4
# Number of particles
nl = 400
nr = 400
np = nl + nr
# Time step constants
dt = 1e-6
tf = 0.1
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.4
k=0.7
h0 = 1.0*xr/nr
# Artificial Heat constants
g1 = 0.5
g2 = 1.0
def get_particles(with_boundary=False, **kwargs):
adke, left, right = data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma)
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta, gamma=gamma,
kernel=kernel, hks=hks)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| Python |
"""Woodward and COllela interacting blast wave."""
import numpy
import pysph.sph.api as sph
import pysph.base.api as base
import pysph.solver.api as solver
xl = 0
xr = 1.0
np = 5001
nbp = 100
dx = (xr-xl)/(np-1)
D = 1.5
h0 = D*dx
adke_eps = 0.5
adke_k = 1.0
g1 = 0.2
g2 = 0.4
alpha = 1.0
beta = 1.0
gamma = 1.4
tf = 0.04
dt = 2.5e-6
class UpdateBoundaryParticles(object):
def __init__(self, particles, dx):
self.particles = particles
self.dx = dx
def eval(self):
left = self.particles.get_named_particle_array("left")
right = self.particles.get_named_particle_array("right")
fluid = self.particles.get_named_particle_array("fluid")
left.h[:nbp] = fluid.h[:nbp]
right.h[-nbp:] = fluid.h[-nbp:]
left.u[:nbp] = -fluid.u[:nbp]
right.u[-nbp:] = -fluid.u[-nbp:]
left.e[:nbp] = fluid.e[:nbp]
right.e[-nbp:] = fluid.e[-nbp:]
left.p[:nbp] = fluid.p[:nbp]
right.p[-nbp:] = fluid.p[-nbp:]
left.rho[:nbp] = fluid.rho[:nbp]
right.rho[-nbp:] = fluid.rho[-nbp:]
left.cs[:nbp] = fluid.cs[:nbp]
right.cs[-nbp:] = fluid.cs[-nbp:]
left.q[:nbp] = fluid.q[:nbp]
right.q[-nbp:] = fluid.q[-nbp:]
def get_particles(**kwargs):
xleft = numpy.arange(xl, 0.1-dx+1e-10, dx)
pleft = numpy.ones_like(xleft) * 1000.0
xmid = numpy.arange(0.1+dx, 0.9-dx+1e-10, dx)
pmid = numpy.ones_like(xmid) * 0.01
xright = numpy.arange(0.9+dx, 1.0+1e-10, dx)
pright = numpy.ones_like(xright) * 100.0
x = numpy.concatenate( (xleft, xmid, xright) )
p = numpy.concatenate( (pleft, pmid, pright) )
rho = numpy.ones_like(x)
m = numpy.ones_like(x) * dx
h = numpy.ones_like(x) * D * dx
e = p/( rho*(gamma-1.0) )
cs = numpy.sqrt(gamma*p/rho)
u = numpy.zeros_like(x)
rhop = numpy.ones_like(x)
div = numpy.zeros_like(x)
q = g1 * h * cs
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x, m=m, h=h, rho=rho,
p=p, e=e, cs=cs, u=u,
rhop=rhop, div=div, q=q)
nbp = 100
x = numpy.ones(nbp)
for i in range(nbp):
x[i] = xl - (i+1)*dx
m = numpy.ones_like(x) * fluid.m[0]
p = numpy.ones_like(x) * fluid.p[0]
rho = numpy.ones_like(x) * fluid.rho[0]
h = numpy.ones_like(x) * fluid.p[0]
e = p/( (gamma-1.0)*rho )
cs = numpy.sqrt(gamma*p/rho)
div = numpy.zeros_like(x)
q = g1 * h * cs
left = base.get_particle_array(name="left", type=base.Boundary,
x=x, p=p, rho=rho, m=m, h=h,
e=e, cs=cs, div=div, q=q)
x = numpy.ones(nbp)
_xr = xr + (nbp+1)*dx
for i in range(nbp):
x[i] = _xr - i*dx
m = numpy.ones_like(x) * fluid.m[-1]
p = numpy.ones_like(x) * fluid.p[-1]
h = numpy.ones_like(x) * fluid.h[-1]
rho = numpy.ones_like(x) * fluid.rho[-1]
e = p/( (gamma-1.0)*rho )
cs = numpy.sqrt(gamma*p/rho)
div = numpy.zeros_like(x)
q = g1 * h * cs
right = base.get_particle_array(name="right", type=base.Boundary,
x=x, p=p, rho=rho, m=m, h=h,
e=e, cs=cs, div=div, q=q)
return [fluid,left,right]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=adke_eps, k=adke_k, g1=g1, g2=g2,
alpha=alpha, beta=beta,gamma=gamma)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=6*h0,
variable_h=True,
create_particles=get_particles)
# add the boundary update function
s.particles.add_misc_function( UpdateBoundaryParticles(s.particles, dx) )
app.run()
| Python |
"""1D shock tube problem which simulates the collision of two strong
shocks. The test is described in 'An adaptive SPH method for strong
shocks' by Leonardo Di. G. Sigalotti and Henri Lopez and Leonardo
Trujillo, JCP, vol 228, pp (5888-5907)
"""
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
import numpy
import get_shock_tube_data as get_data
# Parameters
xl = -1.5; xr = 1.5
pl = 460.894; pr = 46.0950
ul = 19.5975; ur = -6.19633
rhol = 5.999242; rhor = 5.999242
# Number of particles
nl = 500*3
nr = 500*3
np = nl + nr
# Time step constants
dt = 5e-6
tf = 0.035
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.5
k=1.0
D = 1.5
dx = 0.5/500
h0 = D*dx
# mass
m0 = rhol*dx
# Artificial Heat constants
g1 = 0.5
g2 = 0.5
def get_particles(with_boundary=True, **kwargs):
adke, left, right = get_data.get_shock_tube_data(nl=nl,nr=nr,xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=1.4)
adke.m[:] = m0
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta)
app.run()
| Python |
""" Robert's problem """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as get_data
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
# Roberts problem parameters
vc = 0.42
xl = -4.8; xr = 8.0
pl = 10.33; pr = 1.0
ul = -0.81 + vc; ur = -3.44 + vc
rhol = 3.86; rhor = 1.0
# Number of particles
nl = 7500
nr = 2500
np = nl + nr
# Time step constants
dt = 1e-4
tf = 1.5
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.1
k=1.0
h0 = 1.0*xr/nr
m = xr/nr
dxl = abs(xl)/nl
ml = rhol*dxl
# Artificial Heat constants
g1 = 0.5
g2 = 1.0
kernel = base.CubicSplineKernel
hks=False
def get_particles(with_boundary=False, **kwargs):
adke, left, right = get_data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma,
m0=m)
adke.m[:nl] = ml
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta,gamma=gamma,
kernel=kernel, hks=hks)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| Python |
""" Functions to get the initial data for the shock tube problems """
import numpy
import pysph.base.api as base
def get_shock_tube_data(nl, nr, xl, xr,
pl, pr, rhol, rhor, ul, ur,
g1, g2, h0, gamma=1.4,
m0=None):
dxl = numpy.abs(xl)/nl
dxr = numpy.abs(xr)/nr
x = numpy.ones( nl + nr )
x[:nl] = numpy.arange( xl, -dxl+1e-10, dxl )
x[nl:] = numpy.arange( dxr, +xr+1e-10, dxr )
p = numpy.ones_like(x)
p[:nl] = pl
p[nl:] = pr
rho = numpy.ones_like(x)
rho[:nl] = rhol
rho[nl:] = rhor
u = numpy.ones_like(x)
u[:nl] = ul
u[nl:] = ur
e = p/( (gamma-1)*rho )
cs = numpy.sqrt( gamma*p/rho )
if not m0:
m = numpy.ones_like(x) * dxl
else:
m = numpy.ones_like(x) * m0
h = numpy.ones_like(x) * h0
# Extra properties for the ADKE procedure
rhop = numpy.ones_like(x)
div = numpy.ones_like(x)
q = g1 * h * cs
adke = base.get_particle_array(name="fluid", x=x, m=m, rho=rho, h=h,
u=u, p=p, e=e, cs=cs,
rhop=rhop, div=div, q=q)
nbp = 100
# left boundary
x = numpy.ones(nbp)
for i in range(nbp):
x[i] = xl - (i + 1) * dxl
if not m0:
m = numpy.ones_like(x) * dxl
else:
m = numpy.ones_like(x) * m0
h = numpy.ones_like(x) * h0
u = numpy.zeros_like(x) * ul
rho = numpy.ones_like(x) * rhol
p = numpy.ones_like(x) * pl
e = p/( (gamma-1) * rho )
cs = numpy.sqrt( gamma * p/rho )
q = h * cs * g1
left = base.get_particle_array(name="left", x=x, m=m, h=h, u=u,
type=base.Boundary,
rho=rho, p=p, e=e, cs=cs, q=q)
# right boundary
x = numpy.ones(nbp)
for i in range(nbp):
x[i] = xr + (i + 1) * dxr
if not m0:
m = numpy.ones_like(x) * dxl
else:
m = numpy.ones_like(x) * m0
h = numpy.ones_like(x) * h0
u = numpy.zeros_like(x) * ur
rho = numpy.ones_like(x) * rhor
p = numpy.ones_like(x) * pr
e = p/( (gamma-1)*rho )
cs = numpy.sqrt( gamma * p/rho )
q = h * cs * g1
right = base.get_particle_array(name="right", x=x, m=m, h=h, u=u,
type=base.Boundary,
rho=rho, p=p, e=e, cs=cs, q=q)
return adke, left, right
| Python |
""" An example script for running the shock tube problem using Standard
SPH.
Global properties for the shock tube problem:
---------------------------------------------
x ~ [-.6,.6], dxl = 0.001875, dxr = dxl*4, m = dxl, h = 2*dxr
rhol = 1.0, rhor = 0.25, el = 2.5, er = 1.795, pl = 1.0, pr = 0.1795
These are obtained from the solver.shock_tube_solver.standard_shock_tube_data
"""
import logging
import pysph.base.api as base
import pysph.solver.api as solver
from pysph.base.kernels import CubicSplineKernel
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
nl = 320
nr = 80
# Create the application, do this first so the application sets up the
# logging and also gets all command line arguments.
app = solver.Application()
# Set the solver using the default cubic spline kernel
s = solver.ShockTubeSolver(dim=1, integrator_type=solver.EulerIntegrator)
# set the default solver constants.
s.set_final_time(0.15)
s.set_time_step(3e-4)
# Set the application's solver. We do this at the end since the user
# may have asked for a different timestep/final time on the command
# line.
app.setup(
solver=s,
variable_h=False,
create_particles=solver.shock_tube_solver.standard_shock_tube_data,
name='fluid', type=0,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr, smoothing_length=None)
# Run the application.
app.run()
| Python |
""" Standard shock tube problem by Monaghan """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as data
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
kernel = base.CubicSplineKernel
hks=False
# shock tube parameters
xl = -1.0; xr = 1.0
pl = 1000; pr = 0.01
ul = 0.0; ur = 0.0
rhol = 1.0; rhor = 1.0
# Number of particles
nl = 1000
nr = 1000
np = nl + nr
# Time step constants
dt = 5e-6
tf = 0.0075
t = 0.0
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.5
k=1.0
h0 = 1.5*xr/nr
# Artificial Heat constants
g1 = 0.2
g2 = 0.4
def get_particles(with_boundary=False, **kwargs):
adke, left, right = data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma)
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta,
kernel=kernel, hks=hks)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| Python |
""" Cylindrical Noh's implosion problem using the ADKE algorithm.
Particles are distributed on concentric circles about the origin with
increasing number of particles with increasing radius. The velocity is
initially uniform and directed towards the origin.
"""
import numpy
import pysph.sph.api as sp
import pysph.base.api as base
import pysph.solver.api as solver
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
gamma = 5.0/3.0
alpha = 1.0
beta = 1.0
k = 0.9
eps = 0.4
g1 = 0.5
g2 = 1.0
dt = 1e-4
tf = 0.6
n = 120
dr = 1.0/n
h0 = dr
rho0 = 1.0
m1 = pi*dr*dr*rho0/4
def create_particles(**kwargs):
x = numpy.zeros(0)
y = numpy.zeros(0)
u = numpy.zeros(0)
v = numpy.zeros(0)
m = numpy.zeros(0)
rad = 0.0
for j in range(1, n+1):
npnts = 4*j
dtheta = 2*pi/npnts
theta = numpy.arange(0, 2*pi-1e-10, dtheta)
rad = rad + dr
_x = rad*cos(theta)
_y = rad*sin(theta)
_u = -cos(theta)
_v = -sin(theta)
if j == 1:
_m = numpy.ones_like(_x) * m1
else:
_m = numpy.ones_like(_x) * (2.0*j - 1.0)/(j) * m1
x = numpy.concatenate( (x, _x) )
y = numpy.concatenate( (y, _y) )
m = numpy.concatenate( (m, _m) )
u = numpy.concatenate( (u, _u) )
v = numpy.concatenate( (v, _v) )
rho = numpy.ones_like(x) * 1.0
h = numpy.ones_like(x) * h0
p = numpy.ones_like(x) * 0.0
e = numpy.ones_like(x) * 0.0
rhop = numpy.ones_like(x)
div = numpy.zeros_like(x)
q = numpy.zeros_like(x)
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x,y=y,m=m,rho=rho, h=h,
u=u,v=v,p=p,e=e,
rhop=rhop, q=q, div=div)
print "Number of fluid particles = ", fluid.get_number_of_particles()
return fluid
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=2,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta, gamma=gamma)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=create_particles)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta,
gamma=gamma, hks=app.options.hks, kernel=app.options.kernel)
app.run()
| Python |
"""Sedov point explosion problem using the ADKE algorithm.
Particles are distributed on concentric circles about the origin with
increasing number of particles with increasing radius. A unit charge
is distributed about the center which gives the initial pressure
disturbance.
"""
import numpy
import pysph.sph.api as sph
import pysph.base.api as base
import pysph.solver.api as solver
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
gamma=1.4
R = 0.3
n = 110
dr = R/n
alpha=1.0
beta=1.0
g1=1.0
g2=1.0
k=1.0
eps=0.5
h0 = 2*dr
ro = 0.025
rho0 = 1.0
m1 = pi*dr*dr*rho0/10.0
dt = 1e-4
tf = 0.05
def create_particles(**kwargs):
x = numpy.zeros(0)
y = numpy.zeros(0)
p = numpy.zeros(0)
m = numpy.zeros(0)
rad = 0.0
for j in range(1, n+1):
npnts = 10*j
dtheta = 2*pi/npnts
theta = numpy.arange(0, 2*pi-1e-10, dtheta)
rad = rad + dr
_x = rad*cos(theta)
_y = rad*sin(theta)
if j == 1:
_m = numpy.ones_like(_x) * m1
else:
_m = numpy.ones_like(_x) * (2.0*j - 1.0)/(j) * m1
if rad <= ro:
_p = numpy.ones_like(_x) * (gamma-1.0)*1.0/(pi*ro*ro)
else:
_p = numpy.ones_like(_x) * 1e-5
x = numpy.concatenate( (x, _x) )
y = numpy.concatenate( (y, _y) )
m = numpy.concatenate( (m, _m) )
p = numpy.concatenate( (p, _p) )
rho = numpy.ones_like(x) * rho0
h = numpy.ones_like(x) * h0
e = p/( (gamma-1.0)*rho0 )
rhop = numpy.ones_like(x)
div = numpy.zeros_like(x)
q = numpy.zeros_like(x)
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x,y=y,m=m,rho=rho, h=h,
p=p,e=e,
rhop=rhop, q=q, div=div)
print "Number of fluid particles = ", fluid.get_number_of_particles()
return fluid
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=2,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta, gamma=gamma)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=6*h0,
variable_h=True,
create_particles=create_particles)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta,
gamma=gamma, hks=app.options.hks, kernel=app.options.kernel)
app.run()
| Python |
""" Shock tube problem with the ADKE procedure of Sigalotti """
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
from pysph.base.kernels import CubicSplineKernel
import numpy
Fluid = base.ParticleType.Fluid
Boundary = base.ParticleType.Boundary
# Shock tube parameters
nl = int(320 * 7.5)
nr = int(80 * 7.5)
dxl = 0.6/nl
dxr = 4*dxl
h0 = 2*dxr
eps = 0.4
k = 0.7
g1 = 0.2
g2 = 0.5
alpha = 1.0
beta = 1.0
hks = False
class UpdateBoundaryParticles:
def __init__(self, particles):
self.particles = particles
def eval(self):
left = self.particles.get_named_particle_array('left')
right = self.particles.get_named_particle_array("right")
fluid = self.particles.get_named_particle_array("fluid")
left.h[:] = fluid.h[0]
right.h[:] = fluid.h[-1]
def get_fluid_particles(**kwargs):
pa = solver.shock_tube_solver.standard_shock_tube_data(
name="fluid", nl=nl, nr=nr)
pa.add_property({'name':'rhop','type':'double'})
pa.add_property({'name':'div', 'type':'double'})
pa.add_property( {'name':'q', 'type':'double'} )
return pa
def get_boundary_particles(**kwargs):
# left boundary
x = numpy.ones(50)
for i in range(50):
x[i] = -0.6 - (i+1) * dxl
m = numpy.ones_like(x) * dxl
h = numpy.ones_like(x) * 2*dxr
rho = numpy.ones_like(x)
u = numpy.zeros_like(x)
e = numpy.ones_like(x) * 2.5
p = (0.4) * rho * e
#cs = numpy.sqrt(0.4 * e)
cs = numpy.sqrt( 1.4*p/rho )
q = g1 * h * cs
left = base.get_particle_array(name="left", type=Boundary,
x=x, m=m, h=h, rho=rho, u=u,
e=e, cs=cs, p=p, q=q)
# right boundary
for i in range(50):
x[i] = 0.6 + (i + 1)*dxr
m = numpy.ones_like(x) * dxl
h = numpy.ones_like(x) * 2*dxr
rho = numpy.ones_like(x) * 0.25
u = numpy.zeros_like(x)
e = numpy.ones_like(x) * 1.795
p = (0.4) * rho * e
cs = numpy.sqrt( 1.4*p/rho )
q = g1 * h * cs
right = base.get_particle_array(name="right", type=Boundary,
x=x, m=m, h=h, rho=rho, u=u,
e=e, cs=cs,p=p, q=q)
return [left, right]
def get_particles(**kwargs):
particles = []
particles.append(get_fluid_particles())
particles.extend(get_boundary_particles())
return particles
# Create the application
app = solver.Application()
# define the solver and kernel
s = solver.Solver(dim=1, integrator_type=solver.RK2Integrator)
#############################################################
# ADD OPERATIONS
#############################################################
# set the smoothing length
s.add_operation(solver.SPHOperation(
sph.SetSmoothingLength.withargs(h0=h0),
on_types=[base.Fluid,],
updates=["h"],
id="setsmoothing")
)
# pilot rho
s.add_operation(solver.SPHOperation(
sph.ADKEPilotRho.withargs(h0=h0),
on_types=[Fluid], from_types=[Fluid,Boundary],
updates=['rhop'], id='adke_rho'),
)
# smoothing length update
s.add_operation(solver.SPHOperation(
sph.ADKESmoothingUpdate.withargs(h0=h0, k=k, eps=eps, hks=hks),
on_types=[Fluid], updates=['h'], id='adke'),
)
# summation density
s.add_operation(solver.SPHOperation(
sph.SPHRho.withargs(hks=hks),
from_types=[Fluid, Boundary], on_types=[Fluid],
updates=['rho'], id = 'density')
)
# ideal gas equation
s.add_operation(solver.SPHOperation(
sph.IdealGasEquation.withargs(),
on_types = [Fluid], updates=['p', 'cs'], id='eos')
)
# velocity divergence
s.add_operation(solver.SPHOperation(
sph.VelocityDivergence.withargs(hks=hks),
on_types=[Fluid], from_types=[Fluid, Boundary],
updates=['div'], id='vdivergence'),
)
#conduction coefficient update
s.add_operation(solver.SPHOperation(
sph.ADKEConductionCoeffUpdate.withargs(g1=g1, g2=g2),
on_types=[Fluid],
updates=['q'], id='qcoeff'),
)
# momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=1, beta=1, hks=hks),
from_types=[Fluid, Boundary], on_types=[Fluid],
updates=['u'], id='mom')
)
# energy equation
s.add_operation(solver.SPHIntegration(
sph.EnergyEquation.withargs(),
from_types=[Fluid, Boundary],
on_types=[Fluid], updates=['e'], id='enr')
)
# artificial heat
s.add_operation(solver.SPHIntegration(
sph.ArtificialHeat.withargs(eta=0.1),
on_types=[Fluid], from_types=[Fluid,Boundary],
updates=['e'], id='aheat'),
)
# position step
s.add_operation_step([Fluid])
s.set_final_time(0.15)
s.set_time_step(3e-4)
app.setup(
solver=s,
min_cell_size = 4*h0,
variable_h=True, create_particles=get_particles,
locator_type=base.NeighborLocatorType.SPHNeighborLocator
)
# add the boundary update function to the particles
s.particles.add_misc_function( UpdateBoundaryParticles(s.particles) )
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| Python |
""" Strong blaswave problem proposed by Sigalotti. Mach number = 771 """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as data
Locator = base.NeighborLocatorType
kernel = base.CubicSplineKernel
hks=False
# shock tube parameters
xl = -1.5; xr = 1.5
pl = 1e4; pr = 0.01
ul = 0.0; ur = 0.0
rhol = 1.0; rhor = 1.0
# Number of particles
nl = 1500
nr = 1500
np = nl + nr
# Time step constants
dt = 5e-6
tf = 4e-3
t = 0.0
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 5.0/3.0
eta = 0.1
# ADKE Constants
eps = 0.8
k=1.0
dx = xr/nr
D = 1.5
h0 = D*dx
# Artificial Heat constants
g1 = 0.2
g2 = 1.0
def get_particles(with_boundary=False, **kwargs):
adke, left, right = data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma)
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta,gamma=gamma,
kernel=kernel, hks=hks,)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| Python |
""" An example solving the Ellptical drop test case """
import pysph.base.api as base
import pysph.solver.api as solver
import warnings
dt = 1e-4
tf = 0.0076
app = solver.Application()
# set the integrator type
integrator_type = solver.RK2Integrator
s = solver.FluidSolver(dim=2, integrator_type=integrator_type)
s.set_time_step(dt)
s.set_final_time(tf)
# app.setup(
# solver=s,
# variable_h=False,
# create_particles=solver.fluid_solver.get_circular_patch, name='fluid', type=0,
# locator_type=base.NeighborLocatorType.SPHNeighborLocator,
# cl_locator_type=base.OpenCLNeighborLocatorType.LinkedListSPHNeighborLocator,
# domain_manager_type=base.DomainManagerType.LinkedListManager)
app.setup(
solver=s,
variable_h=False,
create_particles=solver.fluid_solver.get_circular_patch, name='fluid', type=0,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
cl_locator_type=base.OpenCLNeighborLocatorType.RadixSortNeighborLocator,
domain_manager_type=base.DomainManagerType.RadixSortManager)
if app.options.with_cl:
msg = """\n\n
You have chosen to run the example with OpenCL support. The only
integrator with OpenCL support is the forward Euler
integrator. This integrator will be used instead of the default
RK2 integrator for this example.\n\n
"""
warnings.warn(msg)
integrator_type = solver.EulerIntegrator
# Print the output at every time step
s.set_print_freq(1)
app.run()
| Python |
"""
PySPH
=====
A general purpose Smoothed Particle Hydrodynamics framework.
This package provides a general purpose framework for SPH simulations
in Python. The framework emphasizes flexibility and efficiency while
allowing most of the user code to be written in pure Python. See here:
http://pysph.googlecode.com
for more information.
"""
from setuptools import find_packages, setup
HAS_CYTHON=True
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
cmdclass = {'build_ext': build_ext}
except ImportError:
HAS_CYTHON=False
cmdclass = {}
from numpy.distutils.extension import Extension
import numpy
import sys
import os
import platform
import multiprocessing
ncpu = multiprocessing.cpu_count()
inc_dirs = [numpy.get_include()]
extra_compile_args = []
extra_link_args = []
mpi_inc_dirs = []
mpi_compile_args = []
mpi_link_args = []
USE_CPP = True
HAS_MPI4PY = True
try:
import mpi4py
# assume a working mpi environment
import commands
if USE_CPP:
mpic = 'mpicxx'
else:
mpic = 'mpicc'
mpi_link_args.append(commands.getoutput(mpic + ' --showme:link'))
mpi_compile_args.append(commands.getoutput(mpic +' --showme:compile'))
mpi_inc_dirs.append(mpi4py.get_include())
except ImportError:
HAS_MPI4PY = False
cy_directives = {'embedsignature':True,
}
C_EXTN = 'c'
if USE_CPP:
C_EXTN = 'cpp'
# cython extension modules (subpackage directory:cython file)
extensions = {'base': ['carray.pyx',
'fast_utils.pyx',
'point.pyx',
'particle_array.pyx',
'cell.pyx',
'kernels.pyx',
'nnps.pyx',
'plane.pyx',
'polygon_array.pyx',
'geometry.pyx',
'nnps_util.pyx',
],
'sph': ['sph_func.pyx',
'sph_calc.pyx',
'kernel_correction.pyx',
],
'sph/funcs': ['basic_funcs.pyx',
'position_funcs.pyx',
'boundary_funcs.pyx',
'external_force.pyx',
'density_funcs.pyx',
'energy_funcs.pyx',
'viscosity_funcs.pyx',
'pressure_funcs.pyx',
'xsph_funcs.pyx',
'eos_funcs.pyx',
'adke_funcs.pyx',
'arithmetic_funcs.pyx',
'stress_funcs.pyx',
'linalg.pyx',
'gsph_funcs.pyx',
'euler1d.pyx',
'test_funcs.pyx',
'common.pyx',
],
'solver': ['particle_generator.pyx',
],
}
parallel_extensions = {'parallel': ['parallel_controller.pyx',
'parallel_cell.pyx',
'parallel_manager.pyx',
],
}
def gen_extensions(ext):
"""Given a dictionary with key package name and value a list of Cython
files, return a list of Extension instances."""
modules = []
for subpkg, files in ext.iteritems():
for filename in files:
base = os.path.splitext(filename)[0]
module = 'pysph.%s.%s'%(subpkg, base)
module = module.replace("/", ".")
ext = 'pyx'
if not HAS_CYTHON:
ext = C_EXTN
src = 'source/pysph/%s/%s.%s'%(subpkg, base, ext)
modules.append(Extension(module, [src]))
return modules
ext_modules = gen_extensions(extensions)
par_modules = gen_extensions(parallel_extensions)
if HAS_MPI4PY:
ext_modules.extend(par_modules)
for extn in ext_modules:
extn.include_dirs = inc_dirs
extn.extra_compile_args = extra_compile_args
extn.extra_link_args = extra_link_args
extn.pyrex_directives = cy_directives
if USE_CPP:
extn.language = 'c++'
for extn in par_modules:
extn.include_dirs.extend(mpi_inc_dirs)
extn.extra_compile_args.extend(mpi_compile_args)
extn.extra_link_args.extend(mpi_link_args)
if 'build_ext' in sys.argv or 'develop' in sys.argv or 'install' in sys.argv:
d = {'__file__':'source/pysph/base/generator.py'}
execfile('source/pysph/base/generator.py', d)
d['main'](None)
if HAS_CYTHON and platform.system() != "Windows":
ext_modules = cythonize(ext_modules,nthreads=ncpu,include_path=inc_dirs)
setup(name='PySPH',
version = '0.9beta',
author = 'PySPH Developers',
author_email = 'pysph-dev@googlegroups.com',
description = "A general purpose Smoothed Particle Hydrodynamics framework",
long_description = __doc__,
url = 'http://pysph.googlecode.com',
license = "BSD",
keywords = "SPH simulation computational fluid dynamics",
test_suite = "nose.collector",
packages = find_packages('source'),
package_dir = {'': 'source'},
ext_modules = ext_modules,
include_package_data = True,
cmdclass=cmdclass,
#install_requires=['mpi4py>=1.2', 'numpy>=1.0.3', 'Cython>=0.14'],
#setup_requires=['Cython>=0.14', 'setuptools>=0.6c1'],
#extras_require={'3D': 'Mayavi>=3.0'},
zip_safe = False,
entry_points = """
[console_scripts]
pysph_viewer = pysph.tools.mayavi_viewer:main
""",
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers = [c.strip() for c in """\
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Natural Language :: English
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Physics
Topic :: Software Development :: Libraries
""".splitlines() if len(c.split()) > 0],
)
| Python |
"""A particle viewer using Mayavi.
This code uses the :py:class:`MultiprocessingClient` solver interface to
communicate with a running solver and displays the particles using
Mayavi. It can also display a list of supplied files.
"""
import sys
import math
import numpy
import socket
import os
import os.path
from enthought.traits.api import (HasTraits, Instance, on_trait_change,
List, Str, Int, Range, Float, Bool, Password, Property)
from enthought.traits.ui.api import (View, Item, Group, HSplit,
ListEditor, EnumEditor, TitleEditor, HGroup)
from enthought.mayavi.core.api import PipelineBase
from enthought.mayavi.core.ui.api import (MayaviScene, SceneEditor,
MlabSceneModel)
from enthought.pyface.timer.api import Timer, do_later
from enthought.tvtk.api import tvtk
from enthought.tvtk.array_handler import array2vtk
from pysph.base.api import ParticleArray, get_particle_array
from pysph.solver.solver_interfaces import MultiprocessingClient
from pysph.solver.utils import load
import logging
logger = logging.getLogger()
def set_arrays(dataset, particle_array):
""" Code to add all the arrays to a dataset given a particle array."""
props = set(particle_array.properties.keys())
# Add the vector data.
vec = numpy.empty((len(particle_array.x), 3), dtype=float)
vec[:,0] = particle_array.u
vec[:,1] = particle_array.v
vec[:,2] = particle_array.w
va = tvtk.to_tvtk(array2vtk(vec))
va.name = 'velocity'
dataset.data.point_data.add_array(vec)
# Now add the scalar data.
scalars = props - set(('u', 'v', 'w'))
for sc in scalars:
arr = particle_array.get(sc)
va = tvtk.to_tvtk(array2vtk(arr))
va.name = sc
dataset.data.point_data.add_array(va)
dataset._update_data()
##############################################################################
# `ParticleArrayHelper` class.
##############################################################################
class ParticleArrayHelper(HasTraits):
"""
This class manages a particle array and sets up the necessary
plotting related information for it.
"""
# The particle array we manage.
particle_array = Instance(ParticleArray)
# The name of the particle array.
name = Str
# Current time.
time = Float(0.0)
# The active scalar to view.
scalar = Str('rho', desc='name of the active scalar to view')
# The mlab plot for this particle array.
plot = Instance(PipelineBase)
# List of available scalars in the particle array.
scalar_list = List(Str)
scene = Instance(MlabSceneModel)
# Sync'd trait with the scalar lut manager.
show_legend = Bool(False, desc='if the scalar legend is to be displayed')
# Sync'd trait with the dataset to turn on/off visibility.
visible = Bool(True, desc='if the particle array is to be displayed')
# Show the time of the simulation on screen.
show_time = Bool(False, desc='if the current time is displayed')
# Do we show the hidden arrays?
show_hidden_arrays = Bool(False,
desc='if hidden arrays are to be listed')
# Private attribute to store the Text module.
_text = Instance(PipelineBase)
########################################
# View related code.
view = View(Item(name='name',
show_label=False,
editor=TitleEditor()),
Group(
Item(name='visible'),
Item(name='show_hidden_arrays'),
Item(name='scalar',
editor=EnumEditor(name='scalar_list')
),
Item(name='show_legend'),
Item(name='show_time'),
),
)
######################################################################
# Private interface.
######################################################################
def _particle_array_changed(self, pa):
self.name = pa.name
# Setup the scalars.
self._show_hidden_arrays_changed(self.show_hidden_arrays)
# Update the plot.
x, y, z, u, v, w = pa.x, pa.y, pa.z, pa.u, pa.v, pa.w
s = getattr(pa, self.scalar)
p = self.plot
mlab = self.scene.mlab
if p is None:
src = mlab.pipeline.vector_scatter(x, y, z, u, v, w,
scalars=s)
p = mlab.pipeline.glyph(src, mode='point', scale_mode='none')
p.actor.property.point_size = 3
p.mlab_source.dataset.point_data.scalars.name = self.scalar
scm = p.module_manager.scalar_lut_manager
scm.set(show_legend=self.show_legend,
use_default_name=False,
data_name=self.scalar)
self.sync_trait('visible', p.mlab_source.m_data,
mutual=True)
self.sync_trait('show_legend', scm, mutual=True)
#set_arrays(p.mlab_source.m_data, pa)
self.plot = p
else:
if len(x) == len(p.mlab_source.x):
p.mlab_source.set(x=x, y=y, z=z, scalars=s, u=u, v=v, w=w)
else:
p.mlab_source.reset(x=x, y=y, z=z, scalars=s, u=u, v=v, w=w)
# Setup the time.
self._show_time_changed(self.show_time)
def _scalar_changed(self, value):
p = self.plot
if p is not None:
p.mlab_source.scalars = getattr(self.particle_array, value)
p.module_manager.scalar_lut_manager.data_name = value
def _show_hidden_arrays_changed(self, value):
pa = self.particle_array
sc_list = pa.properties.keys()
if value:
self.scalar_list = sorted(sc_list)
else:
self.scalar_list = sorted([x for x in sc_list
if not x.startswith('_')])
def _show_time_changed(self, value):
txt = self._text
mlab = self.scene.mlab
if value:
if txt is not None:
txt.visible = True
elif self.plot is not None:
mlab.get_engine().current_object = self.plot
txt = mlab.text(0.01, 0.01, 'Time = 0.0',
width=0.35,
color=(1,1,1))
self._text = txt
self._time_changed(self.time)
else:
if txt is not None:
txt.visible = False
def _time_changed(self, value):
txt = self._text
if txt is not None:
txt.text = 'Time = %.3e'%(value)
##############################################################################
# `MayaviViewer` class.
##############################################################################
class MayaviViewer(HasTraits):
"""
This class represents a Mayavi based viewer for the particles. They
are queried from a running solver.
"""
particle_arrays = List(Instance(ParticleArrayHelper), [])
pa_names = List(Str, [])
scene = Instance(MlabSceneModel, ())
########################################
# Traits to pull data from a live solver.
host = Str('localhost', desc='machine to connect to')
port = Int(8800, desc='port to use to connect to solver')
authkey = Password('pysph', desc='authorization key')
host_changed = Bool(True)
client = Instance(MultiprocessingClient)
controller = Property()
########################################
# Traits to view saved solver output.
files = List(Str, [])
current_file = Str('', desc='the file being viewed currently')
file_count = Range(low='_low', high='n_files', value=0,
desc='the file counter')
play = Bool(False, desc='if all files are played automatically')
loop = Bool(False, desc='if the animation is looped')
# This is len(files) - 1.
n_files = Int(-1)
_low = Int(0)
_play_count = Int(0)
########################################
# Timer traits.
timer = Instance(Timer)
interval = Range(0.5, 20.0, 2.0,
desc='frequency in seconds with which plot is updated')
########################################
# Solver info/control.
current_time = Float(0.0, desc='the current time in the simulation')
time_step = Float(0.0, desc='the time-step of the solver')
iteration = Int(0, desc='the current iteration number')
pause_solver = Bool(False, desc='if the solver should be paused')
########################################
# Movie.
record = Bool(False, desc='if PNG files are to be saved for animation')
frame_interval = Range(1, 100, 5, desc='the interval between screenshots')
movie_directory = Str
# internal counters.
_count = Int(0)
_frame_count = Int(0)
_last_time = Float
########################################
# The layout of the dialog created
view = View(HSplit(
Group(
Group(
Item(name='host'),
Item(name='port'),
Item(name='authkey'),
label='Connection',
defined_when='n_files==-1',
),
Group(
Item(name='current_file'),
Item(name='file_count'),
HGroup(Item(name='play'),
Item(name='loop'),
),
label='Saved Data',
defined_when='n_files>-1',
),
Group(
Group(
Item(name='current_time'),
Item(name='time_step'),
Item(name='iteration'),
Item(name='pause_solver',
enabled_when='n_files==-1'),
Item(name='interval',
enabled_when='n_files==-1'),
label='Solver',
),
Group(
Item(name='record'),
Item(name='frame_interval'),
Item(name='movie_directory'),
label='Movie',
),
layout='tabbed',
),
Group(
Item(name='particle_arrays',
style='custom',
show_label=False,
editor=ListEditor(use_notebook=True,
deletable=False,
page_name='.name'
)
)
),
),
Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=400, width=600, show_label=False),
),
resizable=True,
title='PySPH Particle Viewer',
height=550,
width=880
)
######################################################################
# `MayaviViewer` interface.
######################################################################
@on_trait_change('scene.activated')
def start_timer(self):
if self.n_files > -1:
# No need for the timer if we are rendering files.
return
# Just accessing the timer will start it.
t = self.timer
if not t.IsRunning():
t.Start(int(self.interval*1000))
@on_trait_change('scene.activated')
def update_plot(self):
# No need to do this if files are being used.
if self.n_files > -1:
return
# do not update if solver is paused
if self.pause_solver:
return
if self.client is None:
self.host_changed = True
return
controller = self.controller
if controller is None:
return
self.current_time = t = controller.get_t()
self.time_step = controller.get_dt()
self.iteration = controller.get_count()
for idx, name in enumerate(self.pa_names):
pa = controller.get_named_particle_array(name)
pah = self.particle_arrays[idx]
pah.set(particle_array=pa, time=t)
if self.record:
self._do_snap()
def _do_snap(self):
"""Generate the animation."""
p_arrays = self.particle_arrays
if len(p_arrays) == 0:
return
if self.current_time == self._last_time:
return
if len(self.movie_directory) == 0:
controller = self.controller
output_dir = controller.get_output_directory()
movie_dir = os.path.join(output_dir, 'movie')
self.movie_directory = movie_dir
else:
movie_dir = self.movie_directory
if not os.path.exists(movie_dir):
os.mkdir(movie_dir)
interval = self.frame_interval
count = self._count
if count%interval == 0:
fname = 'frame%06d.png'%(self._frame_count)
p_arrays[0].scene.save_png(os.path.join(movie_dir, fname))
self._frame_count += 1
self._last_time = self.current_time
self._count += 1
######################################################################
# Private interface.
######################################################################
@on_trait_change('host,port,authkey')
def _mark_reconnect(self):
self.host_changed = True
def _get_controller(self):
''' get the controller, also sets the iteration count '''
reconnect = self.host_changed
if not reconnect:
try:
c = self.client.controller
except Exception as e:
logger.info('Error: no connection or connection closed: '\
'reconnecting: %s'%e)
reconnect = True
self.client = None
else:
try:
self.client.controller.get_count()
except IOError:
self.client = None
reconnect = True
if reconnect:
self.host_changed = False
try:
if MultiprocessingClient.is_available((self.host, self.port)):
self.client = MultiprocessingClient(address=(self.host, self.port),
authkey=self.authkey)
else:
logger.info('Could not connect: Multiprocessing Interface'\
' not available on %s:%s'%(self.host,self.port))
return None
except Exception as e:
logger.info('Could not connect: check if solver is '\
'running:%s'%e)
return None
c = self.client.controller
self.iteration = c.get_count()
if self.client is None:
return None
else:
return self.client.controller
def _client_changed(self, old, new):
if self.n_files > -1:
return
if new is None:
return
else:
self.pa_names = self.client.controller.get_particle_array_names()
self.scene.mayavi_scene.children[:] = []
self.particle_arrays = [ParticleArrayHelper(scene=self.scene, name=x) for x in
self.pa_names]
# Turn on the legend for the first particle array.
if len(self.particle_arrays) > 0:
self.particle_arrays[0].set(show_legend=True, show_time=True)
def _timer_event(self):
# catch all Exceptions else timer will stop
try:
self.update_plot()
except Exception as e:
logger.info('Exception: %s caught in timer_event'%e)
def _interval_changed(self, value):
t = self.timer
if t is None:
return
if t.IsRunning():
t.Stop()
t.Start(int(value*1000))
def _timer_default(self):
return Timer(int(self.interval*1000), self._timer_event)
def _pause_solver_changed(self, value):
c = self.controller
if c is None:
return
if value:
c.pause_on_next()
else:
c.cont()
def _record_changed(self, value):
if value:
self._do_snap()
def _files_changed(self, value):
if len(value) == 0:
return
else:
d = os.path.dirname(os.path.abspath(value[0]))
self.movie_directory = os.path.join(d, 'movie')
self.n_files = len(value) - 1
self.frame_interval = 1
fc = self.file_count
self.file_count = 0
if fc == 0:
# Force an update when our original file count is 0.
self._file_count_changed(fc)
t = self.timer
if self.n_files > -1:
if t.IsRunning():
t.Stop()
else:
if not t.IsRunning():
t.Stop()
t.Start(self.interval*1000)
def _file_count_changed(self, value):
fname = self.files[value]
self.current_file = os.path.basename(fname)
# Code to read the file, create particle array and setup the helper.
data = load(fname)
solver_data = data["solver_data"]
arrays = data["arrays"]
self.current_time = t = float(solver_data['t'])
self.time_step = float(solver_data['dt'])
self.iteration = int(solver_data['count'])
names = arrays.keys()
pa_names = self.pa_names
if len(pa_names) == 0:
self.pa_names = names
pas = []
for name in names:
pa = arrays[name]
pah = ParticleArrayHelper(scene=self.scene,
name=name)
# Must set this after setting the scene.
pah.set(particle_array=pa, time=t)
pas.append(pah)
# Turn on the legend for the first particle array.
if len(pas) > 0:
pas[0].set(show_legend=True, show_time=True)
self.particle_arrays = pas
else:
for idx, name in enumerate(pa_names):
pa = arrays[name]
pah = self.particle_arrays[idx]
pah.set(particle_array=pa, time=t)
if self.record:
self._do_snap()
def _play_changed(self, value):
t = self.timer
if value:
self._play_count = 0
t.Stop()
t.callable = self._play_event
t.Start(1000*0.5)
else:
t.Stop()
t.callable = self._timer_event
def _play_event(self):
nf = self.n_files
pc = self.file_count
pc += 1
if pc > nf:
if self.loop:
pc = 0
else:
self.timer.Stop()
pc = nf
self.file_count = pc
self._play_count = pc
######################################################################
def usage():
print """Usage:
pysph_viewer [-v] <trait1=value> <trait2=value> [files.npz]
If *.npz files are not supplied it will connect to a running solver, if not it
will display the given files.
The arguments <trait1=value> are optional settings like host, port and authkey
etc. The following traits are available:
host -- hostname/IP address to connect to.
port -- Port to connect to
authkey -- authorization key to use.
interval -- time interval to refresh display
pause_solver -- Set True/False, will pause running solver
movie_directory -- directory to dump movie files (automatically set if not
supplied)
record -- True/False: record movie, i.e. store screenshots of display.
play -- True/False: Play all stored data files.
loop -- True/False: Loop over data files.
Options:
--------
-h/--help prints this message.
-v sets verbose mode which will print solver connection
status failures on stdout.
Examples::
----------
$ pysph_viewer interval=10 host=localhost port=8900
$ pysph_viewer foo.npz
$ pysph_viewer *.npz play=True loop=True
"""
def error(msg):
print msg
sys.exit()
def main(args=None):
if args is None:
args = sys.argv[1:]
if '-h' in args or '--help' in args:
usage()
sys.exit(0)
if '-v' in args:
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
args.remove('-v')
kw = {}
files = []
for arg in args:
if '=' not in arg:
if arg.endswith('.npz'):
files.append(arg)
continue
else:
usage()
sys.exit(1)
key, arg = [x.strip() for x in arg.split('=')]
try:
val = eval(arg, math.__dict__)
# this will fail if arg is a string.
except NameError:
val = arg
kw[key] = val
def _sort_func(x, y):
"""Sort the files correctly."""
def _process(arg):
a = os.path.splitext(arg)[0]
return int(a[a.rfind('_')+1:])
return cmp(_process(x), _process(y))
files.sort(_sort_func)
# This hack to set n_files first is a dirty hack to work around issues with
# setting up the UI but setting the files only after the UI is activated.
# If we set the particle arrays before the scene is activated, the arrays
# are not displayed on screen so we use do_later to set the files. We set
# n_files to number of files so as to set the UI up correctly.
m = MayaviViewer(n_files=len(files) - 1)
do_later(m.set, files=files, **kw)
m.configure_traits()
if __name__ == '__main__':
main()
| Python |
""" Helper functions to generate commonly used geometries.
PySPH used an axis convention as follows:
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
"""
import numpy
def create_2D_tank(x1,y1,x2,y2,dx):
""" Generate an open rectangular tank.
Parameters:
-----------
x1,y1,x2,y2 : Coordinates defining the rectangle in 2D
dx : The spacing to use
"""
yl = numpy.arange(y1, y2+dx/2, dx)
xl = numpy.ones_like(yl) * x1
nl = len(xl)
yr = numpy.arange(y1,y2+dx/2, dx)
xr = numpy.ones_like(yr) * x2
nr = len(xr)
xb = numpy.arange(x1+dx, x2-dx+dx/2, dx)
yb = numpy.ones_like(xb) * y1
nb = len(xb)
n = nb + nl + nr
x = numpy.empty( shape=(n,) )
y = numpy.empty( shape=(n,) )
idx = 0
x[idx:nl] = xl; y[idx:nl] = yl
idx += nl
x[idx:idx+nb] = xb; y[idx:idx+nb] = yb
idx += nb
x[idx:idx+nr] = xr; y[idx:idx+nr] = yr
return x, y
def create_3D_tank(x1, y1, z1, x2, y2, z2, dx):
""" Generate an open rectangular tank.
Parameters:
-----------
x1,y1,x2,y2,x3,y3 : Coordinates defining the rectangle in 2D
dx : The spacing to use
"""
points = []
# create the base X-Y plane
x, y = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx]
x = x.ravel(); y = y.ravel()
z = numpy.ones_like(x) * z1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the front X-Z plane
x, z = numpy.mgrid[x1:x2+dx/2:dx, z1:z2+dx/2:dx]
x = x.ravel(); z = z.ravel()
y = numpy.ones_like(x) * y1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the Y-Z plane
y, z = numpy.mgrid[y1:y2+dx/2:dx, z1:z2+dx/2:dx]
y = y.ravel(); z = z.ravel()
x = numpy.ones_like(y) * x1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the second X-Z plane
x, z = numpy.mgrid[x1:x2+dx/2:dx, z1:z2+dx/2:dx]
x = x.ravel(); z = z.ravel()
y = numpy.ones_like(x) * y2
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the second Y-Z plane
y, z = numpy.mgrid[y1:y2+dx/2:dx, z1:z2+dx/2:dx]
y = y.ravel(); z = z.ravel()
x = numpy.ones_like(y) * x2
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
points = set(points)
x = numpy.array( [i[0] for i in points] )
y = numpy.array( [i[1] for i in points] )
z = numpy.array( [i[2] for i in points] )
return x, y, z
def create_2D_filled_region(x1, y1, x2, y2, dx):
x,y = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx]
x = x.ravel(); y = y.ravel()
return x, y
def create_3D_filled_region(x1, y1, z1, x2, y2, z2, dx):
x,y,z = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx, z1:z2+dx/2:dx]
x = x.ravel()
y = y.ravel()
z = z.ravel()
return x, y, z
| Python |
''' convert pysph .npz output to vtk file format '''
import os
import re
from enthought.tvtk.api import tvtk, write_data
from numpy import array, c_, ravel, load, zeros_like
def write_vtk(data, filename, scalars=None, vectors={'V':('u','v','w')}, tensors={},
coords=('x','y','z'), dims=None, **kwargs):
''' write data in to vtk file
Parameters
----------
data : dict
mapping of variable name to their numpy array
filename : str
the file to write to (can be any recognized vtk extension)
if extension is missing .vts extension is appended
scalars : list
list of arrays to write as scalars (defaults to data.keys())
vectors : dict
mapping of vector name to vector component names to take from data
tensors : dict
mapping of tensor name to tensor component names to take from data
coords : list
the name of coordinate data arrays (default=('x','y','z'))
dims : 3 tuple
the size along the dimensions for (None means x.shape)
**kwargs : extra arguments for the file writer
example file_type=binary/ascii
'''
x = data[coords[0]]
y = data.get(coords[1], zeros_like(x))
z = data.get(coords[2], zeros_like(x))
if dims is None:
dims = array([1,1,1])
dims[:x.ndim] = x.shape
else:
dims = array(dims)
sg = tvtk.StructuredGrid(points=c_[x.flat,y.flat,z.flat],dimensions=array(dims))
pd = tvtk.PointData()
if scalars is None:
scalars = [i for i in data.keys() if i not in coords]
for v in scalars:
pd.scalars = ravel(data[v])
pd.scalars.name = v
sg.point_data.add_array(pd.scalars)
for vec,vec_vars in vectors.iteritems():
u,v,w = [data[i] for i in vec_vars]
pd.vectors = c_[ravel(u),ravel(v),ravel(w)]
pd.vectors.name = vec
sg.point_data.add_array(pd.vectors)
for ten,ten_vars in tensors.iteritems():
vars = [data[i] for i in ten_vars]
tensors = c_[[ravel(i) for i in vars]].T
pd.tensors = tensors
pd.tensors.name = ten
sg.point_data.add_array(pd.tensors)
write_data(sg, filename, **kwargs)
def detect_vectors_tensors(keys):
''' detect the vectors and tensors from given array names
Vectors are identified as the arrays with common prefix followed by
0,1 and 2 in their names
Tensors are identified as the arrays with common prefix followed by
two character codes representing ij indices
(00,01,02,11,12,22) for a symmetric tensor
(00,01,02,10,11,12,20,21,22) for a tensor
Arrays not belonging to vectors or tensors are returned as scalars
Returns scalars,vectors,tensors in a format suitable to be used as arguments
for :py:func:`write_vtk`
'''
d = {}
for k in keys:
d[len(k)] = d.get(len(k), [])
d[len(k)].append(k)
scalars = []
vectors = {}
tensors = {}
for n,l in d.iteritems():
if n<2:
continue
l.sort()
idx = -1
while idx<len(l)-1:
idx += 1
k = l[idx]
# check if last char is 0
if k[-1] == '0':
# check for tensor
if k[-2] == '0':
# check for 9 tensor
ten = []
for i in range(3):
for j in range(3):
ten.append(k[:-2]+str(j)+str(i))
ten.sort()
if l[idx:idx+9] == ten:
tensors[k[:-2]] = ten
idx += 8
continue
# check for symm 6 tensor
ten2 = []
for i in range(3):
for j in range(i+1):
ten2.append(k[:-2]+str(j)+str(i))
ten2.sort()
if l[idx:idx+6] == ten2:
ten = []
for i in range(3):
for j in range(3):
ten.append(k[:-2]+str(min(i,j))+str(max(i,j)))
tensors[k[:-2]] = ten
idx += 5
continue
# check for vector
vec = []
for i in range(3):
vec.append(k[:-1] + str(i))
if l[idx:idx+3] == vec:
vectors[k[:-1]] = vec
idx += 2
continue
scalars.append(k)
return scalars, vectors, tensors
def get_output_details(path):
solvers = {}
if not os.path.isdir(path):
path = os.path.dirname(path)
files = os.listdir(path)
files.sort()
pat = re.compile(r'(?P<solver>.+)_(?P<rank>\d+)_(?P<entity>.+)_(?P<time>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?).npz')
matches = [(f,pat.match(f)) for f in files]
files = []
for filename,match in matches:
if match is None:
continue
files.append(filename)
groups = match.groupdict()
solvername = groups['solver']
solver = solvers.get(solvername)
if solver is None:
solver = [set([]),set([]),set([])]
solvers[solvername] = solver
solver[0].add(groups['rank'])
solver[1].add(groups['entity'])
solver[2].add(groups['time'])
# {solver:(entities,procs,times)}
return solvers
def pysph_to_vtk(path, merge_procs=False, skip_existing=True, binary=True):
''' convert pysph output .npz files into vtk format
Parameters
----------
path : str
directory where .npz files are located
merge_procs : bool
whether to merge the data from different procs into a single file
(not yet implemented)
skip_existing : bool
skip files where corresponding vtk already exist
this is useful if you've converted vtk files while a solver is running
only want to convert the newly added files
binary : bool
whether to use binary format in vtk file
The output vtk files are stored in a directory `solver_name` _vtk within
the `path` directory
'''
if binary:
data_mode = 'binary'
else:
data_mode = 'ascii'
if merge_procs is True:
# FIXME: implement
raise NotImplementedError, 'merge_procs=True not implemented yet'
solvers = get_output_details(path)
for solver, (procs, entities, times) in solvers.iteritems():
print 'converting solver:', solver
dir = os.path.join(path,solver+'_vtk')
if not os.path.exists(dir):
os.mkdir(dir)
procs = sorted(procs)
entities = sorted(entities)
times = sorted(times, key=float)
times_file = open(os.path.join(dir,'times'), 'w')
for entity in entities:
print ' entity:', entity
for proc in procs:
print ' proc:', proc
print ' timesteps:', len(times)
f = '%s_%s_%s_'%(solver,proc,entity)
of = os.path.join(dir,f)
for i, time in enumerate(times):
print '\r',i,
if skip_existing and os.path.exists(f+str(i)):
continue
d = load(os.path.join(path, f+time+'.npz'))
arrs = {}
for nam,val in d.iteritems():
if val.ndim > 0:
arrs[nam] = val
d.close()
scalars, vectors, tensors = detect_vectors_tensors(arrs)
vectors['V'] = ['u','v','w']
z = zeros_like(arrs['x'])
if 'v' not in arrs:
arrs['v'] = z
if 'w' not in arrs:
arrs['w'] = z
write_vtk(arrs, of+str(i),
scalars=scalars, vectors=vectors, tensors=tensors,
data_mode=data_mode)
times_file.write('%d\t%s\n'%(i,time))
times_file.close()
def extract_text(path, particle_idx, props=['x','y','u','v','p','rho','sigma00','sigma01','sigma11'], ent=None, solvers=None):
if solvers:
raise NotImplementedError
else:
solvers = get_output_details(path)
for solver, (procs, entities, times) in solvers.iteritems():
print 'converting solver:', solver
dir = os.path.join(path,solver+'_vtk')
if not os.path.exists(dir):
os.mkdir(dir)
procs = sorted(procs)
entities = sorted(entities)
times = sorted(times, key=float)
times_file = open(os.path.join(dir,'times'), 'w')
e = ent
if ent is None:
e = entities
for entity in entities:
if entity not in e:
continue
print ' entity:', entity
for proc in procs:
print ' proc:', proc
print ' timesteps:', len(times)
f = '%s_%s_%s_'%(solver,proc,entity)
of = os.path.join(dir,f)
files = [open(os.path.join(path,f+'%d.dat'%particle_id), 'w') for particle_id in particle_idx]
print files
for file in files:
file.write('i\tt\t'+'\t'.join(props))
for i, time in enumerate(times):
print '\r',i,
d = load(os.path.join(path, f+time+'.npz'))
s = '\n%d\t%s'%(i,time)
for j,file in enumerate(files):
file.write(s)
for prop in props:
file.write('\t')
file.write(str(d[prop][particle_idx[j]]))
d.close()
for file in files:
file.close()
def test():
l = ['x'+str(i) for i in range(3)]
l.append('a0')
l.append('a1')
for i in range(3):
for j in range(3):
if i == j:
l.append('XX%d'%i)
if i <= j:
l.append('S%d%d'%(i,j))
l.append('T%d%d'%(i,j))
scalars, vectors, tensors = detect_vectors_tensors(l)
assert set(scalars) == set(['a0','a1'])
assert set(vectors) == set(['x','XX'])
assert set(tensors) == set(['S','T'])
if __name__ == '__main__':
import sys
pysph_to_vtk(path=sys.argv[1])
| Python |
""" Module to implement various space filling curves for load balancing """
import numpy
from pysph.base.point import IntPoint
try:
from hilbert import Hilbert_to_int
have_hilbert = True
except ImportError:
# TODO: implement Hilbert's SFC
have_hilbert = False
def morton_sfc(cell_id, maxlen=20, dim=3):
"""Returns key of indices using Morton's space filling curve """
if isinstance(cell_id, IntPoint):
cell_id = (cell_id.x,cell_id.y,cell_id.z)
cell_id = cell_id[:dim]
binary_repr = numpy.binary_repr
s = 2**maxlen
#x_bin = binary_repr(cell_id.x+s)
#y_bin = binary_repr(cell_id.y+s)
#z_bin = binary_repr(cell_id.z+s)
binr = [binary_repr(i+s) for i in cell_id]
#maxlen = len(binary_repr(2**self.level))
bins = []
for bin in binr:
if len(bin) < maxlen+1:
bin = '0'*(maxlen-len(bin)) + bin
bins.append(bin)
#x_bin ,y_bin,z_bin = bins
key = 0
for i in range(maxlen+1):
for bin in bins:
key = 2*key + (bin[i] == '1')
return key
def hilbert_sfc(cell_id, maxlen=20, dim=3):
"""Returns key of indices using Hilbert space filling curve """
if isinstance(cell_id, IntPoint):
cell_id = (cell_id.x,cell_id.y,cell_id.z)
cell_id = cell_id[:dim]
s = 2**maxlen
return Hilbert_to_int([int(i+s) for i in cell_id])
sfc_func_dict = {'morton':morton_sfc}
if have_hilbert:
sfc_func_dict['hilbert'] = hilbert_sfc
| Python |
from parallel_manager import ParallelManager
from parallel_controller import ParallelController
from pysph.base.particle_array import get_local_real_tag, get_dummy_tag
from pysph.base.fast_utils import arange_long
# logger imports
import logging
logger = logging.getLogger()
# Constants
Dummy = get_dummy_tag()
LocalReal = get_local_real_tag()
class SimpleParallelManager(ParallelManager):
"""This is a very simple parallel manager. It simply broadcasts all the
particles. Each machine has exactly the same particles for all time.
There is no support currently for dynamically changing the particles but
that should be trivial to add.
"""
def __init__(self, parallel_controller=None):
if parallel_controller is None:
parallel_controller = ParallelController()
self.parallel_controller = parallel_controller
self.comm = parallel_controller.comm
self.size = self.parallel_controller.num_procs
self.rank = self.parallel_controller.rank
def initialize(self, particles):
"""Initialize the parallel manager with the `Particles`.
"""
self.particles = particles
def update(self):
"""Update particles. This method simply partitions the particles
equally among the processors.
"""
logger.debug("SimpleParallelManager.update()")
comm = self.comm
rank = self.rank
size = self.size
local_data = self.particles.arrays
# Remove remotes from the local.
for arr in local_data:
remove = arange_long(arr.num_real_particles, arr.get_number_of_particles())
arr.remove_particles(remove)
# everybody sets the pid for their local arrays
arr.set_pid(rank)
comm.Barrier()
# Collect all the local arrays and then broadcast them.
data = comm.gather(local_data, root=0)
data = comm.bcast(data, root=0)
# Now set the remote data's tags to Dummy and add the arrays to
# the local.
for i in range(size):
if i != rank:
for j, arr in enumerate(data[i]):
tag = arr.get_carray('tag')
tag.get_npy_array()[:] = Dummy
#local = arr.get_carray('local')
#local.get_npy_array()[:] = 0
local_data[j].append_parray(arr)
return
def update_remote_particle_properties(self, props):
"""Update only the remote particle properties.
This is typically called when particles don't move but only some of
their properties have changed.
"""
logger.debug("SimpleParallelManager.update_remote_particle_properties()")
# Just call update.
self.update()
| Python |
"""
Module to implement parallel decomposition of particles to assign to
different processes during parallel simulations. The method used is an
extension of k-means clustering algorithm
"""
# logging imports
import logging
logger = logging.getLogger()
# standard imports
import numpy
# local imports
from pysph.base.cell import py_construct_immediate_neighbor_list
from load_balancer import LoadBalancer
from load_balancer_sfc import LoadBalancerSFC
class Cluster():
"""Class representing a cluster in k-means clustering"""
def __init__(self, cells, cell_np, np_req, **kwargs):
"""constructor
kwargs can be used to finetune the algorithm:
t = ratio of old component of center used in the center calculation
tr = `t` when the number of particles over/undershoot (reversal)
u = ratio of nearest cell center in the new center from the remaining
(1-t) (other component is the centroid) of cells
e = reciprocal of the exponent of
(required particles)/(actual particles) used to
resize the cluster
er = `e` on reversal (see `tr`)
r = clipping of resize factor between (1/r and r)
"""
self.cells = cells
self.cell_np = cell_np
self.dnp = 0
self.np = 0
self.dsize = 0.0
self.size = 1.0
self.np_req = np_req
# ratio of old component
self.tr = kwargs.get('tr',0.8)
# ratio of nearest cell in the new component (other is the centroid)
self.u = kwargs.get('u',0.4)
# exponent for resizing
self.e = kwargs.get('e',3.0)
self.er = kwargs.get('er',6.0)
self.r = kwargs.get('r',2.0)
# there's no previous center hence it shouldn't come into calculation
self.t = 0.0
self.x = self.y = self.z = 0.0
np = 0
for cell in self.cells:
n = self.cell_np[cell]
np += n
self.x += (cell.x)#*n
self.y += (cell.y)#*n
self.z += (cell.z)#*n
self.np = np
np = float(len(self.cells))
self.x, self.y, self.z = self.x/np,self.y/np,self.z/np
self.center = numpy.array([self.x, self.y, self.z])
self.dcenter = self.center*0
# so that initial setting is not way off
self.move()
# set the value of t
self.t = kwargs.get('t',0.2)
def calc(self):
"""calculate the number of particles and the change in the number of
particles (after a reallocation of cells)"""
np = 0
for cell in self.cells:
n = self.cell_np[cell]
np += n
self.dnp = np - self.np
self.np = np
def move(self):
"""move the center depending on the centroid of cells (A),
the nearest cell to the centroid (B) and the old center(C)
formula: new center = (1-t)(1-u)A + (1-t)uB + tC
t = tr on reversal (overshoot/undershoot of particles)"""
x = y = z = 0.0
for cell in self.cells:
x += (cell.x)#*n
y += (cell.y)#*n
z += (cell.z)#*n
np = float(len(self.cells))
med = numpy.array([x/np,y/np,z/np])
dists = []
for cell in self.cells:
d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2
d = numpy.sqrt(d)
dists.append(d)
#md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2
#dists[-1] = (dists[-1]+md)/2
cell = self.cells[numpy.argmin(dists)]
cc = numpy.array([cell.x, cell.y, cell.z])
t = self.t
if abs(self.dnp) * ( self.np-self.np_req) > 0:
t = self.tr
self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))
self.x,self.y,self.z = self.center = self.center + self.dcenter
def resize(self):
"""resize the cluster depending on the number of particles and the
required number of particles
formula: new size = (old_size)*(np_req/np)**(1/e),
clipped between r and 1/r"""
e = self.e
if abs(self.dnp) * ( self.np-self.np_req) > 0:
e = self.er
self.dsize = numpy.clip((self.np_req/self.np)**(1./e), 1/self.r, self.r)
self.size *= self.dsize
class ParDecompose:
"""Partition of cells for parallel solvers"""
def __init__(self, cell_proc, proc_cell_np, init=True, **kwargs):
"""constructor
kwargs can be used to finetune the algorithm:
c = (0.3) the ratio of euler distance contribution in calculating the
distance of particle from cluster center
(the other component is scaled distance based on cluster size)
t = (0.2) ratio of old component of center in the center calculation
tr = (0.8) `t` when the number of particles over/undershoot (reversal)
u = (0.4) ratio of nearest cell center in the new center from the
remaining (1-t) (other component is the centroid) of cells
e = (3) reciprocal of the exponent of
(required particles)/(actual particles) used to
resize the cluster
er = (6) `e` on reversal (see `tr`)
r = (2) clipping of resize factor between (1/r and r)
"""
self.block_proc = cell_proc
self.proc_block_np = proc_cell_np
self.num_procs = len(proc_cell_np)
self.c = kwargs.get('c', 0.3)
if init:
self.gen_clusters(**kwargs)
def clusters_allocate_cells(self):
"""allocate the cells in the cell manager to clusters based on their
"weighted distance" from the center of the cluster"""
for cluster in self.clusters:
cluster.cells[:] = []
for cell in self.block_proc:
wdists = []
for cluster in self.clusters:
s = cluster.size
d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +
(cell.z-cluster.z)**2 )
d = numpy.sqrt(d)
c = self.c
# TODO: choose a better distance function below
r = d*(c+(1-c)*numpy.exp(-s/d))
r = numpy.clip(r,0,r)
wdists.append(r)
self.clusters[numpy.argmin(wdists)].cells.append(cell)
def get_distribution(self):
"""return the list of cells and the number of particles in each
cluster to be used for distribution to processes"""
self.calc()
proc_blocks = self.proc_blocks
proc_num_particles = self.particle_loads
cell_proc = LoadBalancer.get_block_proc(proc_blocks=proc_blocks)
return cell_proc, proc_num_particles
def cluster_bal_iter(self):
"""perform a single iteration of balancing the clusters
**algorithm**
# move the cluster center based on their cells
# allocate cells to clusters based on new centers
# resize the clusters based on the number of particles
# allocate cells to clusters based on new sizes
"""
# moving
for j,cluster in enumerate(self.clusters):
cluster.move()
self.clusters_allocate_cells()
for j,cluster in enumerate(self.clusters):
cluster.calc()
#print j, '\t', cluster.center, '\t', cluster.np, '\t', cluster.size
# resizing
for j,cluster in enumerate(self.clusters):
cluster.resize()
self.clusters_allocate_cells()
for j,cluster in enumerate(self.clusters):
cluster.calc()
#print j, '\t', cluster.center, '\t', cluster.np, '\t', cluster.size
self.calc()
def calc(self):
"""calculates the cells in each process, the cell and particle loads
and the imbalance in the distribution"""
self.proc_blocks = [cluster.cells for cluster in self.clusters]
self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]
self.particle_loads = [cluster.np for cluster in self.clusters]
self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)
def gen_clusters(self, proc_cells=None, proc_num_particles=None, **kwargs):
"""generate the clusters to operate on. This is automatically called
by the constructor if its `init` argument is True (default)"""
cell_np = {}
for tmp_cells_np in self.proc_block_np:
cell_np.update(tmp_cells_np)
self.cell_np = cell_np
if proc_cells is None:
proc_cells, proc_num_particles = LoadBalancer.distribute_particles_geometric(
self.cell_np, self.num_procs)
self.np_req = numpy.average(proc_num_particles)
self.clusters = [Cluster(cells, cell_np, self.np_req, **kwargs)
for cells in proc_cells]
self.calc()
def distribute_particles(cm, num_procs, max_iter=200, n=5, **kwargs):
""" distribute particles according to the modified k-means clustering
algorithm implemented by the `ParDecompose` class
The algorithm runs maximum `max_iter` iterations.
The solution is assumed converged if the particle distribution is same
in `n+k` steps out of `n+2k` latest steps
See :class:`ParDecompose` for the fine-tuning parameters kwargs"""
pd = ParDecompose(cm, num_procs, **kwargs)
pd.calc()
proc_num_particles = pd.particle_loads
conv = 0
for t in range(max_iter):
pd.cluster_bal_iter()
pd.calc()
#print t
proc_num_particlesold = proc_num_particles
proc_num_particles = pd.particle_loads
imbal = pd.imbalance
logger.debug('imbalance %g' %imbal)
if proc_num_particlesold == proc_num_particles:
conv += 1
logger.debug('converged in %d iterations' %t)
if conv > n:
break
else:
conv -= 1
if conv < 0: conv = 0
return pd.get_distribution()
###############################################################################
# `LoadBalancerMKMeans` class.
###############################################################################
class LoadBalancerMKMeans(LoadBalancerSFC):
def __init__(self, **args):
LoadBalancerSFC.__init__(self, **args)
self.method = 'serial_mkmeans'
self.args = args
def load_balance_func_serial_mkmeans(self, **args):
self.load_balance_func_serial('mkmeans', **args)
def load_redistr_mkmeans(self, cell_proc=None, proc_cell_np=None, max_iter=None, n=3, **args):
""" distribute particles according to the modified k-means clustering
algorithm implemented by the `ParDecompose` class
The algorithm runs maximum `max_iter` iterations.
The solution is assumed converged if the particle distribution is same
in `n+k` steps out of `n+2k` latest steps
See :class:`ParDecompose` for the fine-tuning parameters kwargs"""
args2 = {}
args2.update(self.args)
args2.update(args)
if max_iter is None:
max_iter = self.lb_max_iterations
#print args
pd = ParDecompose(cell_proc, proc_cell_np, **args)
pd.calc()
proc_num_particles = pd.particle_loads
conv = 0
for t in range(max_iter):
pd.cluster_bal_iter()
pd.calc()
#print t
proc_num_particlesold = proc_num_particles
proc_num_particles = pd.particle_loads
imbal = pd.imbalance
logger.debug('imbalance %g' %imbal)
if proc_num_particlesold == proc_num_particles:
conv += 1
logger.debug('converged in %d iterations' %t)
if conv > n:
logger.debug('mkm converged in %d iterations' %t)
break
else:
conv -= 1
if conv < 0: conv = 0
#self.balancing_done = True
return pd.get_distribution()
| Python |
"""
Contains class to perform load balancing using space filling curves.
"""
# logging imports
import logging
logger = logging.getLogger()
# standard imports
import numpy
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.base.cell import py_construct_immediate_neighbor_list
from load_balancer import LoadBalancer
import space_filling_curves
###############################################################################
# `LoadBalancerSFC` class.
###############################################################################
class LoadBalancerSFC(LoadBalancer):
def __init__(self, sfc_func_name='morton', sfc_func_dict=None, **args):
LoadBalancer.__init__(self, **args)
self.method = 'serial_sfc'
if sfc_func_dict is None:
sfc_func_dict = space_filling_curves.sfc_func_dict
self.sfc_func_dict = sfc_func_dict
self.sfc_func = sfc_func_name
def load_balance_func_serial_sfc(self, sfc_func_name=None, **args):
""" serial load balance function which uses SFCs
calls the :class:Loadbalancer :meth:load_balance_func_serial
setting the appropriate sfc function
"""
if sfc_func_name is None:
sfc_func_name = self.sfc_func
sfc_func = self.sfc_func_dict[sfc_func_name]
self.load_balance_func_serial('sfc', sfc_func=sfc_func, **args)
def load_redistr_sfc(self, cell_proc, proc_cell_np, sfc_func=None, **args):
""" function to redistribute the cells amongst processes using SFCs
This is called by :class:Loadbalancer :meth:load_balance_func_serial
"""
if isinstance(sfc_func, str):
sfc_func = self.sfc_func_dict[sfc_func]
if sfc_func is None:
sfc_func = self.sfc_func_dict[self.sfc_func]
num_procs = len(proc_cell_np)
num_cells = len(cell_proc)
cell_arr = numpy.empty((num_cells, 3))
for i,cell_id in enumerate(cell_proc):
cell_arr[i,0] = cell_id.x
cell_arr[i,1] = cell_id.y
cell_arr[i,2] = cell_id.z
dim = 3
if min(cell_arr[:,2])==max(cell_arr[:,2]):
dim = 2
if min(cell_arr[:,1])==max(cell_arr[:,1]):
dim = 1
np_per_proc = sum(self.particles_per_proc)/float(self.num_procs)
cell_ids = cell_proc.keys()
cell_ids.sort(key=lambda x: sfc_func(x, dim=dim))
ret_cells = [[] for i in range(num_procs)]
proc_num_particles = [0]*num_procs
np = 0
proc = 0
for cell_id in cell_ids:
np += self.proc_block_np[cell_proc[cell_id]][cell_id]
#print proc, cell_id, np
ret_cells[proc].append(cell_id)
if np > np_per_proc:
proc_num_particles[proc] = np
np -= np_per_proc
proc += 1
self.particles_per_proc = [0]*self.num_procs
cell_np = {}
for cnp in self.proc_block_np:
cell_np.update(cnp)
for proc,cells in enumerate(ret_cells):
for cid in cells:
cell_proc[cid] = proc
self.particles_per_proc[proc] += cell_np[cid]
self.balancing_done = True
return cell_proc, self.particles_per_proc
###############################################################################
| Python |
""" Contains class to perform load balancing.
"""
#FIXME: usage documentation
# logging imports
import logging
logger = logging.getLogger()
# standard imports
import numpy
# local imports
from pysph.base.particle_array import ParticleArray, get_particle_array
from pysph.base.cell import CellManager, py_construct_immediate_neighbor_list
TAG_LB_PARTICLE_REQUEST = 101
TAG_LB_PARTICLE_REPLY = 102
###############################################################################
# `LoadBalancer` class.
###############################################################################
class LoadBalancer:
""" Class to perform simple load balancing. """
def __init__(self, parallel_cell_manager=None, *args, **kwargs):
self.setup_done = False
self.cell_manager = parallel_cell_manager
self.skip_iteration = 10
self.pid = 0
self.num_procs = 1
self.particles_per_proc = []
self.ideal_load = 0.
self.threshold_ratio = 25.
self.threshold_margin = 0.
self.lb_max_iterations = 10
self.upper_threshold = 0.
self.lower_threshold = 0.
self.load_difference = []
self.prev_particle_count = []
self.method = None
#self.adaptive = kwargs.get('adaptive', True)
def setup(self):
""" Sets up some internal data. """
if self.setup_done == True:
return
self.proc_map = self.cell_manager.proc_map
self.parallel_controller = self.cell_manager.parallel_controller
self.pid = self.parallel_controller.rank
self.num_procs = self.parallel_controller.num_procs
self.comm = self.parallel_controller.comm
self.setup_done = True
def load_balance(self, method=None, **args):
""" Calls the load_balance_func """
self.setup()
if method is None:
method = self.method
if method is None or method == '':
self.load_balance_func(**args)
else:
func = getattr(self, 'load_balance_func_'+method)
func(**args)
def load_balance_func(self, adaptive=False):
return self.load_balance_func_normal(adaptive)
def load_balance_func_normal(self, adaptive=False):
""" Perform the load balancing.
**Algorithm**
- while load not balanced or lb iterations not exceeded.
- Compute some statistics
- Find the number of real particles in all processors.
- Find the total number of particles.
- Find the mean number of particles with each processor.
- If number of particles with each processor is within a
particular threshold from the mean, load is balanced, exit.
- Sort processor ids in increasing order of number of particles
with them. In case of multiple processors having the same
number of particles, arrange them in ascending order of pid.
- If there are some processors with 0 particles, communication
among all processors.
- If no such processors are there, each processor shall
communicate with adjacent neighbors.
- *********** PASS1 ************
- mypid <- self.rank
- num_procs <- len(procs_to_communicate)
- i = num_procs-1
- pid <- procs_to_communicate[i]
- while pid != mypid:
- send request to pid for particles.
- recv particles of one or more blocks from pid
- add particles to particle array.
- i -= 1
- *********** PASS2 ************
- i = 0
- pid <- procs_to_communicate[i]
- while pid != mypid:
- recv request from pid for particles.
- find a suitable set of blocks to offload.
- send particles of these blocks to pid.
- remove sent particles from local blocks.
- i += 1
- BARRIER.
- bin particles top down.
- update processor map.
- update neighbor information.
- lb_iterations += 1
"""
self.adaptive = adaptive
balancing_done = False
current_balance_iteration = 0
num_procs = self.num_procs
self.particles_per_proc = [0]*num_procs
if len(self.prev_particle_count) == 0:
self.prev_particle_count = [0]*num_procs
self.ideal_load = 0.
self.load_difference = [0]*num_procs
while balancing_done == False:
block_np = {}
for bid, cells in self.cell_manager.proc_map.cell_map.iteritems():
block_np[bid] = 0
for cid in cells:
block_np[bid] += self.cell_manager.cells_dict[cid].get_number_of_particles()
self.proc_block_np = [{} for i in range(num_procs)]
self.proc_block_np[self.pid].update(block_np)
logger.info('Load Balance iteration %d -------------------'%(
current_balance_iteration))
if current_balance_iteration >= self.lb_max_iterations:
balancing_done = True
logger.info('MAX LB ITERATIONS EXCEEDED')
continue
# get the number of particles with each process.
self.particles_per_proc = self.collect_num_particles()
self.calc_load_thresholds(self.particles_per_proc)
min_diff = min(self.load_difference)
max_diff = max(self.load_difference)
if (abs(min_diff) < self.threshold_margin and max_diff <
self.threshold_margin):
balancing_done = True
logger.info('BALANCE ACHIEVED')
logger.debug('Num particles are : %s'%(self.particles_per_proc))
continue
logger.info('particle_counts: %r: %r'%(self.prev_particle_count,
self.particles_per_proc))
if self.particles_per_proc == self.prev_particle_count:
# meaning that the previous load balancing iteration did not
# change the particle counts, we do not do anything now.
balancing_done = True
logger.info('Load unchanged')
continue
logger.debug('Total particles : %d'%(self.total_particles))
logger.debug('Ideal load : %d'%(self.ideal_load))
logger.debug('Load DIfference : %s'%(self.load_difference))
logger.info('Particle counts : %s'%(self.particles_per_proc))
logger.debug('Threshold margin: %f'%(self.threshold_margin))
logger.debug('Upper threshold : %f'%(self.upper_threshold))
logger.debug('Lower threshold : %f'%(self.lower_threshold))
self.block_proc = self.cell_manager.proc_map.block_map
# store the old particle counts in prev_particle_count
self.prev_particle_count[:] = self.particles_per_proc
if min(self.particles_per_proc) == 0:
self.load_balance_with_zero_procs()
else:
self.load_balance_normal()
# update the cell information.
self.cell_manager.remove_remote_particles()
self.cell_manager.delete_empty_cells()
self.cell_manager.rebin_particles()
self.proc_map.glb_update_proc_map(self.cell_manager.cells_dict)
#assert len(self.proc_map.conflicts) == 0
#recv_particles = self.proc_map.resolve_procmap_conflicts({})
self.proc_map.find_region_neighbors()
#self.cell_manager.add_entering_particles_from_neighbors(recv_particles)
self.comm.Barrier()
current_balance_iteration += 1
def collect_num_particles(self):
""" Finds the number of particles with each processor.
**Algorithm**
- gather each processors particle count at the root.
- scatter this data to all processors.
"""
arrays = self.cell_manager.arrays_to_bin
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
particles_per_proc = self.comm.gather(num_particles, root=0)
# now num_particles has one entry for each processor, containing the
# number of particles with each processor. broadcast that data to all
# processors.
particles_per_proc = self.comm.bcast(particles_per_proc, root=0)
return particles_per_proc
def load_balance_normal(self):
""" The normal diffusion based load balance algorithm. """
self.procs_to_communicate = self._get_procs_to_communicate(
self.particles_per_proc,
self.cell_manager.proc_map.nbr_procs)
num_procs = len(self.procs_to_communicate)
# PASS 1
num_procs = len(self.procs_to_communicate)
i = num_procs - 1
pid = self.procs_to_communicate[i]
while pid != self.pid:
self.normal_lb_pass1(pid)
i -= 1
pid = self.procs_to_communicate[i]
# PASS 2
i = 0
pid = self.procs_to_communicate[i]
while pid != self.pid:
self.normal_lb_pass2(pid)
i += 1
pid = self.procs_to_communicate[i]
def load_balance_with_zero_procs(self):
""" Balances load when there are some processes with no particles.
**Idea**
If a process has zero particles, it requests the process with the
highest number of particles(at the start of the algorithm) for
particles. The process may or may not donate particles. If the zero
particle proc gets particles from this process, it will send empty
requests to the rest of the non-zero particle procs. Each zero particle
proc does this until it finds the first process ready to donate
particles.
**Algorithm**
- if process is zero particle proc, then starting with the proc
having highest number of proc start requesting all other procs,
till another zero particle proc is reached.
- if process is non-zero particle proc, then starting with the first
proc having zero particles, respond to requests from each proc.
"""
num_procs = self.num_procs
self.procs_to_communicate = self._get_procs_to_communicate(
self.particles_per_proc,
range(self.num_procs))
if self.particles_per_proc[self.pid] == 0:
self._zero_request_particles()
else:
self._zero_donate_particles()
def _get_procs_to_communicate(self, particles_per_proc, procs_to_communicate):
"""
Returns the list of procs in correct order to communicate with during
load balancing. The procs will be same as in the list
procs_to_communicate but will be ordered properly in order to avoid any
deadlocks.
The returned proc list will have process id's sorted in increasing order
of the number of particles in them. In case of ties, lower process id
will appear before a higher process id.
**Parameters**
- particles_per_proc - the number of particles with EVERY processor
in the world.
- procs_to_communicate - list of procs to communicate with while
load balancing.
"""
proc_order = list(numpy.argsort(particles_per_proc, kind='mergesort'))
for i in range(len(proc_order)-1):
if particles_per_proc[proc_order[i]] ==\
particles_per_proc[proc_order[i+1]]:
if proc_order[i] > proc_order[i+1]:
# swap the two
temp = proc_order[i]
proc_order[i] = proc_order[i+1]
proc_order[i+1] = temp
# select from this sorted order, the procs in procs_to_communicate.
output_procs = []
for proc in proc_order:
if procs_to_communicate.count(proc) == 1:
output_procs.append(proc)
return output_procs
def normal_lb_pass1(self, pid):
""" Request processors having more particles than self to donate
**Algorithm**
- send request for particles to pid.
- recv reply.
- depending on reply add new particles to self.
**Data sent/received**
- check if we need more particles.
- if yes
- send a dictionary in the format given below.
- receive a dictionary of blocks with particles for them, the
dictionary could be empty.
- add particles received in the particle arrays as real
particles.
- if no
- send a dictionary in the format given below.
- receive an empty dictionary.
"""
logger.debug('Requesting %d for particles'%(pid))
send_data = self._build_particle_request()
self.comm.send(send_data, dest=pid, tag=TAG_LB_PARTICLE_REQUEST)
data = self.comm.recv(source=pid, tag=TAG_LB_PARTICLE_REPLY)
particle_data = data['particles']
self.cell_manager.add_local_particles_to_parray(particle_data)
logger.debug('req recvd: DONE with recv: %r'%data)
def normal_lb_pass2(self, pid):
""" Process requests from processors with lesser particles than self.
Algorithm:
----------
- recv request from pid.
- if pid requested particles
- check if we have particles enough to give.
- if yes, choose an appropriate block(s), extract particles and send.
"""
logger.debug('Processing request from %d'%(pid))
comm = self.comm
arrays = self.cell_manager.arrays_to_bin
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
request = comm.recv(source=pid, tag=TAG_LB_PARTICLE_REQUEST)
reply = self._build_particle_request_reply(request, pid)
comm.send(reply, dest=pid, tag=TAG_LB_PARTICLE_REPLY)
logger.debug('process request DONE with reply: %r'%reply)
def _build_particle_request(self):
""" Build the dictionary to be sent as a particle request. """
arrays = self.cell_manager.arrays_to_bin
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
data = {}
if num_particles < self.ideal_load:
data['need_particles'] = True
data['num_particles'] = num_particles
else:
data['need_particles'] = False
return data
def _build_particle_request_reply(self, request, pid):
""" Build the reply to be sent in response to a request. """
arrays = self.cell_manager.arrays_to_bin
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
reply = {}
if request['need_particles'] == False:
logger.debug('%d request for NO particles'%(pid))
reply['particles'] = {}
return reply
num_particles_in_pid = request['num_particles']
# check if pid has more particles than us.
if num_particles_in_pid >= num_particles:
logger.debug('%d has more particles that %d'%(pid, self.pid))
reply['particles'] = {}
return reply
# if our number of particles is within the threshold, do not donate
# particles.
if abs(self.ideal_load-num_particles) < self.threshold_margin:
if (not (num_particles-num_particles_in_pid) >
self.threshold_margin):
logger.debug('Need not donate - not overloaded')
reply['particles'] = {}
return reply
# if we have only one block, do not donate.
if len(self.cell_manager.proc_map.local_block_map) == 1:
logger.debug('Have only one block - will not donate')
reply['particles'] = {}
return reply
# get one or more blocks to send to pid
data = self._get_particles_for_neighbor_proc(pid)
reply['particles'] = data
return reply
def _get_particles_for_neighbor_proc(self, pid):
""" Returns particles (in blocks) to be moved to pid for processing """
self.block_nbr_proc = self.construct_nbr_block_info(self.block_proc)
# get one or more blocks to send to pid
pidr = self.pid
if self.adaptive:
num_iters = 10
else:
num_iters = 1
blocks = []
for i in range(num_iters):
np = self.particles_per_proc[pidr]
npr = self.particles_per_proc[pid]
if np <= npr or np < self.ideal_load-self.threshold_margin/2 or npr >= self.ideal_load+self.threshold_margin/2:
np_reqd = 0
break
else:
mean = (np+npr)/2
if mean < self.ideal_load-self.threshold_margin/2:
np_reqd = np-self.ideal_load+self.threshold_margin/2
elif mean > self.ideal_load+self.threshold_margin/2:
np_reqd = np-self.ideal_load-self.threshold_margin/2
else:
np_reqd = np - mean
if self.adaptive:
blk = self._get_blocks_for_neighbor_proc2(pid, pidr, self.proc_block_np[pidr], np_reqd)
else:
blk = self._get_blocks_for_neighbor_proc(pid, self.proc_block_np[pidr])
for bid in blk:
self._update_block_pid_info(bid, pidr, pid)
blocks.extend(blk)
#blocks_for_nbr = self._get_blocks_for_neighbor_proc(pid,
# self.proc_map.local_block_map,
# self.block_nbr_proc)
blocks_for_nbr = blocks
block_dict = {}
for bid in blocks_for_nbr:
block_dict[bid] = []
for cid in self.proc_map.cell_map[bid]:
block_dict[bid].append(self.cell_manager.cells_dict[cid])
del self.proc_map.cell_map[bid]
if block_dict:
# if all blocks are being sent away, keep the last cid with self
if len(block_dict) == len(self.proc_map.local_block_map):
del block_dict[bid]
particles = self.cell_manager.create_new_particle_copies(block_dict)
else:
logger.debug('No blocks found for %d'%(pid))
particles = {}
return particles
def _zero_request_particles(self):
""" Requests particles from processors with some particles. """
arrays = self.cell_manager.arrays_to_bin
comm = self.comm
i = self.num_procs - 1
req = {}
done = False
while i > 0 and done == False:
pid = self.procs_to_communicate[i]
np = self.particles_per_proc[pid]
if np == 0:
done = True
continue
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
req['num_particles'] = num_particles
if num_particles > 0:
req['need_particles'] = False
else:
req['need_particles'] = True
comm.send(req, dest=pid, tag=TAG_LB_PARTICLE_REQUEST)
data = comm.recv(source=pid, tag=TAG_LB_PARTICLE_REPLY)
# add the particles in the parray
particles = data['particles']
self.cell_manager.add_local_particles_to_parray(particles)
i -= 1
def _zero_donate_particles(self):
""" Respond to a request for particles from a zero particle process. """
comm = self.comm
i = 0
reply = {}
done = False
while i < self.num_procs and done == False:
pid = self.procs_to_communicate[i]
np = self.particles_per_proc[pid]
if np > 0:
done = True
continue
# receive the request from pid
req = comm.recv(source=pid, tag=TAG_LB_PARTICLE_REQUEST)
reply = self._process_zero_proc_request(pid, req)
comm.send(reply, dest=pid, tag=TAG_LB_PARTICLE_REPLY)
i += 1
def _process_zero_proc_request(self, pid, request):
""" Construct reply for request from process with no particles """
if request['need_particles'] == False:
return {'particles':{}}
num_particles_with_pid = request['num_particles']
if num_particles_with_pid > 0:
logger.warn('Invalid request from %d'%(pid))
return {'particles':{}}
particles = self._get_boundary_blocks_to_donate(pid)
return {'particles':particles}
def _get_boundary_blocks_to_donate(self, pid):
""" Get boundary blocks to be donated to proc with no particles. """
self.block_nbr_proc = self.construct_nbr_block_info(self.block_proc)
blocks_for_proc = self._get_blocks_for_zero_proc(pid,
self.proc_map.local_block_map,
self.block_nbr_proc)
block_dict = {}
for bid in blocks_for_proc:
block_dict[bid] = []
for cid in self.proc_map.cell_map[bid]:
block_dict[bid].append(self.cell_manager.cells_dict[cid])
del self.proc_map.cell_map[bid]
del self.proc_map.local_block_map[bid]
if block_dict:
# if all blocks are being sent away, keep the last cid with self
if len(block_dict) == len(self.proc_map.local_block_map):
del block_dict[bid]
particles = self.cell_manager.create_new_particle_copies(block_dict)
else:
logger.debug('No blocks found for %d'%(pid))
particles = {}
return particles
def calc_load_thresholds(self, particles_per_proc):
self.total_particles = sum(self.particles_per_proc)
self.ideal_load = float(self.total_particles) / self.num_procs
self.threshold_margin = self.ideal_load * self.threshold_ratio / 100.
self.lower_threshold = self.ideal_load - self.threshold_margin
self.upper_threshold = self.ideal_load + self.threshold_margin
for i in range(self.num_procs):
self.load_difference[i] = (self.particles_per_proc[i] -
self.ideal_load)
def load_balance_func_serial(self, distr_func='single', **args):
""" Perform load balancing serially by gathering all data on root proc
**Algorithm**
- on root proc
- Compute some statistics
- Find the number of real particles in all processors.
- Find the total number of particles.
- Find the mean number of particles with each processor.
- If number of particles with each processor is within a
particular threshold from the mean, load is balanced, exit.
- Sort processor ids in increasing order of number of particles
with them. In case of multiple processors having the same
number of particles, arrange them in ascending order of pid.
- If there are some processors with 0 particles, communication
among all processors.
- If no such processors are there, each processor shall
communicate with adjacent neighbors.
- collect all cells and number of particles on each proc on root
- distribute particles on root proc using same algorithm as for
distributed load balancing
- send the info to send/recv cells to all procs
- BARRIER.
- bin particles top down.
- update processor map.
- update neighbor information.
- lb_iterations += 1
"""
redistr_func = getattr(self, 'load_redistr_'+distr_func)
self.balancing_done = False
current_balance_iteration = 0
self.load_difference = [0] * self.num_procs
self._gather_block_particles_info()
old_distr = {}
for proc_no, cells in enumerate(self.proc_block_np):
for cellid in cells:
old_distr[cellid] = proc_no
self.old_distr = old_distr
self.block_proc = {}
self.block_proc.update(old_distr)
#print '(%d)'%self.pid, self.block_proc
self.block_nbr_proc = self.construct_nbr_block_info(self.block_proc)
while self.balancing_done == False and self.pid == 0:
logger.info('Load Balance iteration %d -------------------' % (
current_balance_iteration))
if current_balance_iteration >= self.lb_max_iterations:
self.balancing_done = True
logger.info('MAX LB ITERATIONS EXCEEDED')
continue
self.load_balance_serial_iter(redistr_func, **args)
current_balance_iteration += 1
# do the actual transfer of particles now
self.redistr_cells(self.old_distr, self.block_proc)
logger.info('load distribution : %r : %r'%(set(self.block_proc.values()),
self.particles_per_proc))
# update the cell information.
self.cell_manager.remove_remote_particles()
self.cell_manager.delete_empty_cells()
self.cell_manager.rebin_particles()
self.proc_map.glb_update_proc_map(self.cell_manager.cells_dict)
#assert len(self.proc_map.conflicts) == 0
#recv_particles = self.proc_map.resolve_procmap_conflicts({})
self.proc_map.find_region_neighbors()
#self.cell_manager.add_entering_particles_from_neighbors(recv_particles)
logger.info('waiting for lb to finish')
self.comm.Barrier()
if logger.getEffectiveLevel() <= 20: # only for level <= INFO
cell_np = {}
np = 0
for cellid, cell in self.cell_manager.cells_dict.items():
cell_np[cellid] = cell.get_number_of_particles()
np += cell_np[cellid]
logger.info('(%d) %d particles in %d cells' % (self.pid, np, len(cell_np)))
def load_balance_serial_iter(self, redistr_func, **args):
""" A single iteration of serial load balancing """
# get the number of particles with each process.
#self.particles_per_proc = self.collect_num_particles()
self.calc_load_thresholds(self.particles_per_proc)
min_diff = min(self.load_difference)
max_diff = max(self.load_difference)
if (abs(min_diff) < self.threshold_margin and max_diff <
self.threshold_margin):
self.balancing_done = True
logger.info('BALANCE ACHIEVED')
logger.debug('Num particles are : %s' % (self.particles_per_proc))
return
if self.particles_per_proc == self.prev_particle_count and self.pid == 0:
# meaning that the previous load balancing iteration did not
# change the particle counts, we do not do anything now.
self.balancing_done = True
logger.info('Load unchanged')
return
if logger.getEffectiveLevel() <= 20: # only for level <= INFO
logger.debug('Total particles : %d' % (self.total_particles))
logger.debug('Ideal load : %d' % (self.ideal_load))
logger.debug('Load Difference : %s' % (self.load_difference))
logger.info('Particle counts : %s' % (self.particles_per_proc))
logger.debug('Threshold margin: %f' % (self.threshold_margin))
logger.debug('Upper threshold : %f' % (self.upper_threshold))
logger.debug('Lower threshold : %f' % (self.lower_threshold))
if not self.balancing_done:
# store the old particle counts in prev_particle_count
self.prev_particle_count[:] = self.particles_per_proc
self.block_proc, self.particles_per_proc = redistr_func(
self.block_proc, self.proc_block_np, **args)
def _gather_block_particles_info(self):
self.particles_per_proc = [0] * self.num_procs
block_np = {}
for bid, cells in self.cell_manager.proc_map.cell_map.iteritems():
block_np[bid] = 0
for cid in cells:
block_np[bid] += self.cell_manager.cells_dict[cid].get_number_of_particles()
self.block_np = block_np
self.proc_block_np = self.comm.gather(block_np, root=0)
#print '(%d)'%self.pid, self.proc_block_np
if self.proc_block_np is None:
self.proc_block_np = []
for i, c in enumerate(self.proc_block_np):
for cnp in c.values():
self.particles_per_proc[i] += cnp
logger.debug('(%d) %r' %(self.pid, self.particles_per_proc))
self.particles_per_proc = self.comm.bcast(self.particles_per_proc, root=0)
logger.debug('(%d) %r' % (self.pid, self.particles_per_proc))
def redistr_cells(self, old_distr, new_distr):
""" redistribute blocks in the procs as per the new distr,
using old_distr to determine the incremental data to be communicated
old_distr and new_distr are used only on the root proc
old_distr and new_distr are dict of bid:pid and need only contain
changed blocks
"""
logging.debug('redistributing blocks')
r = range(self.num_procs)
sends = [[[] * self.num_procs for i in r] for j in r]
recvs = [[[] * self.num_procs for i in r] for j in r]
for bid, opid in old_distr.iteritems():
npid = new_distr[bid]
if opid != npid:
recvs[npid][opid].append(bid)
sends[opid][npid].append(bid)
sends = self.comm.scatter(sends, root=0)
recvs = self.comm.scatter(recvs, root=0)
# now each proc has all the blocks it needs to send/recv from other procs
logging.debug('sends' + str([len(i) for i in sends]))
logging.debug('recvs' + str([len(i) for i in recvs]))
# greater pid will recv first
for i in range(self.pid):
self.recv_particles(recvs[i], i)
self.send_particles(sends[i], i)
# smaller pid will send first
for i in range(self.pid + 1, self.num_procs):
self.send_particles(sends[i], i)
self.recv_particles(recvs[i], i)
logging.debug('redistribution of blocks done')
def load_redistr_single(self, block_proc=None, proc_block_np=None,
adaptive=False, **args):
""" The load balance algorithm running on root proc
The algorithm is same as the parallel normal load balancing algorithm,
except zero proc handling that is run completely on the root proc
"""
self.adaptive = adaptive
self.procs_to_communicate = self._get_procs_to_communicate(
self.particles_per_proc, range(self.num_procs))
#self.procs_to_communicate = numpy.argsort(self.particles_per_proc)[::-1]
num_procs = len(self.procs_to_communicate)
if self.particles_per_proc[self.procs_to_communicate[-1]] == 0:
# load balancing with zero_procs
for i in range(num_procs):
pid = self.procs_to_communicate[i]
for j in range(num_procs-i):
if self.particles_per_proc[pid] != 0:
break
pidr = self.procs_to_communicate[-j-1]
self.single_lb_transfer_blocks(pid, pidr)
else:
# pass1 pid = pid, pass2 pid = pidr
for i in range(num_procs):
pid = self.procs_to_communicate[i]
for j in range(num_procs-i):
pidr = self.procs_to_communicate[-j-1]
self.single_lb_transfer_blocks(pid, pidr)
logger.debug('load_redistr_single done')
return self.block_proc, self.particles_per_proc
def load_redistr_auto(self, block_proc=None, proc_block_np=None, **args):
""" load redistribution by automatic selection of method
If only one proc has all the particles, then use the
load_redistr_geometric method, else use load_redistr_simple
"""
non_zeros = len([1 for p in self.particles_per_proc if p > 0])
if non_zeros == 1:
logger.info('load_redistr_auto: geometric')
block_proc, np_per_proc = self.load_redistr_geometric(self.block_proc,
self.proc_block_np)
self.balancing_done = False
self.block_nbr_proc = self.construct_nbr_block_info(block_proc)
block_np = {}
for proc,c_np in enumerate(self.proc_block_np):
block_np.update(c_np)
self.proc_block_np = [{} for i in range(self.num_procs)]
for cid,pid in block_proc.iteritems():
self.proc_block_np[pid][cid] = block_np[cid]
return block_proc, np_per_proc
else:
logger.info('load_redistr_auto: serial')
return self.load_redistr_single(self.block_proc, self.proc_block_np,
**args)
def single_lb_transfer_blocks(self, pid, pidr):
""" Allocate particles from proc pidr to proc pid (on root proc) """
num_particles = self.particles_per_proc[pid]
if num_particles < self.ideal_load:
need_particles = True
else:
need_particles = True
num_particlesr = self.particles_per_proc[pidr]
if num_particles == 0 and num_particlesr > 1:
# send a block to zero proc
blocks = self._get_blocks_for_zero_proc(pid, self.proc_block_np[pidr])
for bid in blocks:
self._update_block_pid_info(bid, pidr, pid)
return blocks
logger.debug('%d %d %d %d transfer' % (pid, num_particles, pidr, num_particlesr))
if need_particles == False:
logger.debug('%d request for NO particles' % (pid))
return []
# check if pid has more particles than pidr
if num_particles >= num_particlesr:
logger.debug('%d has more particles that %d' % (pid, pidr))
return []
# if number of particles in pidr is within the threshold, do not donate
# particles
if abs(self.ideal_load - num_particlesr) < self.threshold_margin:
if (not (num_particlesr - num_particles) > self.threshold_margin):
logger.debug('Need not donate - not overloaded')
return []
# if pidr has only one block, do not donate
if len(self.proc_block_np[pidr]) == 1:
logger.debug('Have only one block - will not donate')
return []
# get one or more blocks to send to pid
if self.adaptive:
num_iters = 10
else:
num_iters = 1
blocks = []
for i in range(num_iters):
np = self.particles_per_proc[pidr]
npr = self.particles_per_proc[pid]
if np <= npr or np < self.ideal_load-self.threshold_margin/2 or npr >= self.ideal_load+self.threshold_margin/2:
np_reqd = 0
break
else:
mean = (np+npr)/2
if mean < self.ideal_load-self.threshold_margin/2:
np_reqd = np-self.ideal_load+self.threshold_margin/2
elif mean > self.ideal_load+self.threshold_margin/2:
np_reqd = np-self.ideal_load-self.threshold_margin/2
else:
np_reqd = np - mean
if self.adaptive:
blk = self._get_blocks_for_neighbor_proc2(pid, pidr, self.proc_block_np[pidr], np_reqd)
else:
blk = self._get_blocks_for_neighbor_proc(pid, self.proc_block_np[pidr])
for bid in blk:
self._update_block_pid_info(bid, pidr, pid)
blocks.extend(blk)
return blocks
def recv_particles(self, blocks, pid):
""" recv particles from proc pid """
# do not communicate if nothing is to be transferred
if not blocks: # blocks is empty
return
logger.debug('Receiving particles in %d blocks from %d' % (len(blocks), pid))
particles = self.comm.recv(source=pid, tag=TAG_LB_PARTICLE_REPLY)
self.cell_manager.add_local_particles_to_parray(particles)
logger.debug('Received particles from %d' % (pid))
def send_particles(self, blocks, pid):
""" send particles in blocks to proc pid """
# do not communicate if nothing is to be transferred
if not blocks: # blocks is empty
return
logger.debug('Sending particles in %d blocks to %d' % (len(blocks), pid))
particles = self._build_particles_to_send_from_blocks(blocks, pid)
self.comm.send(particles, dest=pid, tag=TAG_LB_PARTICLE_REPLY)
logger.debug('Sent particles to %d' % (pid))
def _build_particles_to_send_from_blocks(self, blocks, pid):
"""
Build the reply to be sent in response to a request.
Returns particles blocks to be moved to pid for processing
"""
cell_dict = {}
for bid in blocks:
for cid in self.cell_manager.proc_map.cell_map[bid]:
cell = self.cell_manager.cells_dict[cid]
cell_dict[cid] = [cell]
particles = self.cell_manager.create_new_particle_copies(cell_dict)
return particles
def _get_blocks_for_zero_proc(self, pid, blocks, block_nbr_proc=None):
""" return a block to be sent to nbr zero proc `pid`
blocks is the sequence of blocks from which to choose the blocks to send
Algorithm:
----------
- find all boundary blocks.
- choose the one with the least number of neighbors to donate
"""
if block_nbr_proc is None:
block_nbr_proc = self.block_nbr_proc
max_empty_count = -1
blocks_for_nbr = []
for bid in blocks:
empty_count = block_nbr_proc[bid].get(-1, 0)
if empty_count > max_empty_count:
max_empty_count = empty_count
blocks_for_nbr = [bid]
return blocks_for_nbr
def _get_blocks_for_neighbor_proc(self, pid, blocks, block_nbr_proc=None):
""" return blocks to be sent to nbr proc `pid`
Parameters:
-----------
- `blocks` - sequence of blocks from which to choose the blocks to send
- `block_nbr_proc` - (self.block_nbr_proc) a dictionary mapping bid to
a dictionary of proc to num_nbr_blocks_in_proc as returned by
`construct_nbr_block_info()`
Algorithm:
----------
- Get all blocks with self that have remote neighbors.
- Of these get all particles that have 'pid' as neighbor.
- Of these choose the blocks with the msximum number of neighbor
blocks in pid.
"""
if block_nbr_proc is None:
block_nbr_proc = self.block_nbr_proc
max_neighbor_count = 1
blocks_for_nbr = []
for bid in blocks:
bpid = self.block_proc[bid]
local_nbr_count = block_nbr_proc[bid].get(bpid, 0)
remote_nbr_count = 26 - block_nbr_proc[bid].get(-1, 0) - local_nbr_count
if remote_nbr_count == 0:
#logger.debug('%s has no remote nbrs'%(cid))
continue
num_nbrs_in_pid = block_nbr_proc[bid].get(pid)
if not num_nbrs_in_pid:
continue
if num_nbrs_in_pid > max_neighbor_count:
max_neighbor_count = num_nbrs_in_pid
blocks_for_nbr = [bid]
elif num_nbrs_in_pid == max_neighbor_count:
blocks_for_nbr.append(bid)
if not blocks_for_nbr:
logger.debug('No blocks found for %d' % (pid))
return blocks_for_nbr
def _get_blocks_for_neighbor_proc2(self, pid, pidr, blocks, np_reqd, block_nbr_proc=None):
""" return blocks to be sent to nbr proc `pid` """
if block_nbr_proc is None:
block_nbr_proc = self.block_nbr_proc
blocks_for_nbr = []
block_score = {}
# get score for each block
x = y = z = 0 # for centroid of blocks
max_neighbor_count = 0
for bid in blocks:
bpid = self.block_proc[bid]
local_nbr_count = block_nbr_proc[bid].get(bpid, 0)
remote_nbr_count = 26 - block_nbr_proc[bid].get(-1, 0) - local_nbr_count
num_nbrs_in_pid = block_nbr_proc[bid].get(pid, 0)
if max_neighbor_count < num_nbrs_in_pid:
max_neighbor_count = num_nbrs_in_pid
block_score.clear()
block_score[bid] = 2*num_nbrs_in_pid + remote_nbr_count - local_nbr_count
elif max_neighbor_count == num_nbrs_in_pid:
block_score[bid] = 2*num_nbrs_in_pid + remote_nbr_count - local_nbr_count
x += bid.x
y += bid.y
z += bid.z
if max_neighbor_count == 0:
return []
num_blocks = float(len(blocks))
x /= num_blocks
y /= num_blocks
z /= num_blocks
block_dist = {}
for bid in blocks:
block_dist[bid] = ((bid.x-x)**2+(bid.y-y)**2+(bid.z-z)**2)**0.5
mean_dist = numpy.average(block_dist.values())
for bid in block_score:
block_score[bid] += block_dist[bid] / mean_dist
# allocate block for neighbor
sblocks = sorted(block_score, key=block_score.get, reverse=True)
particles_send = 0
block_np = self.proc_block_np[pidr]
max_score = block_score[sblocks[0]]
#print block_np
for bid in sblocks:
#if max_neighbor_count > block_nbr_proc[bid].get(pid, 0):
# continue
particles_send += block_np[bid]
if particles_send > np_reqd or block_score[bid] < max_score-2:
particles_send -= block_np[bid]
break
blocks_for_nbr.append(bid)
if not blocks_for_nbr:
logger.debug('No blocks found for %d' % (pid))
return blocks_for_nbr
@classmethod
def construct_nbr_block_info(self, block_proc, nbr_for_blocks=None):
""" Construct and return the dict of bid:{pid:nnbr} having the neighbor
pid information for each block.
If nbr_for_blocks is specified as a sequence of blocks, only these
blocks' nbrs will be computed """
block_nbr_proc = {} # bid:{pid:nnbr}
if nbr_for_blocks is None:
nbr_for_blocks = block_proc
for bid in nbr_for_blocks:
nbrs = []
nbrcnt = {}
py_construct_immediate_neighbor_list(bid, nbrs, False)
for nbr in nbrs:
p = block_proc.get(nbr, -1) # -1 is count of missing neighbors
nbrcnt[p] = nbrcnt.get(p, 0) + 1
block_nbr_proc[bid] = nbrcnt
return block_nbr_proc
def _update_block_pid_info(self, bid, old_pid, new_pid):
""" Update the block_nbr_proc dict to reflect a change in the pid of
block bid to new_pid """
#print bid, old_pid, new_pid
self.block_proc[bid] = new_pid
block_nbr_proc = self.block_nbr_proc
nbrs = []
py_construct_immediate_neighbor_list(bid, nbrs, False)
for nbr in nbrs:
nbr_info = block_nbr_proc.get(nbr)
if nbr_info is not None:
nbr_info[old_pid] -= 1
nbr_info[new_pid] = nbr_info.get(new_pid, 0) + 1
self.proc_block_np[new_pid][bid] = self.proc_block_np[old_pid][bid]
del self.proc_block_np[old_pid][bid]
block_np = self.proc_block_np[new_pid][bid]
self.particles_per_proc[old_pid] -= block_np
self.particles_per_proc[new_pid] += block_np
###########################################################################
# simple method to assign some blocks to all procs based on geometry
# subdivision. The distribution is unsuitable as load balancer,
# but may provide a good method to initiate laod balancing
###########################################################################
def load_redistr_geometric(self, block_proc, proc_block_np, allow_zero=False, **args):
""" distribute block_np to processors in a simple geometric way
**algorithm**
# get the distribution size of each dimension using `get_distr_size()`
based on the domain size of the block_np
# divide the domain into rectangular grids
# assign block_np to each processor
# check empty processors and divide block_np in processor having
more than average block_np to the empty processors
"""
num_procs = len(proc_block_np)
block_np = {}
for cnp in proc_block_np:
block_np.update(cnp)
proc_blocks, proc_num_particles = self.distribute_particles_geometric(
block_np, num_procs, allow_zero)
self.balancing_done = True
return self.get_block_proc(proc_blocks=proc_blocks), proc_num_particles
@staticmethod
def get_distr_sizes(l=1., b=1., h=1., num_procs=12):
"""return the number of clusters to be generated along each dimension
l,b,h are the size of the domain
return: s = ndarray of size 3 = number of divisions along each dimension
s[0]*s[1]*s[2] >= num_procs"""
x = numpy.array([l,b,h], dtype='float')
compprod = numpy.cumprod(x)[-1]
fac = (float(num_procs)/compprod)**(1.0/3)
s = x*fac
s = numpy.ceil(s)
cont = True
while cont:
ss = numpy.argsort(s)
if (s[ss[2]]-1)*(s[ss[1]])*(s[ss[0]]) >= num_procs:
s[ss[2]] -= 1
continue
elif (s[ss[2]])*(s[ss[1]]-1)*(s[ss[0]]) >= num_procs:
s[ss[1]] -= 1
continue
elif (s[ss[2]])*(s[ss[1]])*(s[ss[0]]-1) >= num_procs:
s[ss[0]] -= 1
continue
else:
cont = False
#print 'sizes: %s'%(str(s))
#print distortion(*s/x)
return s
@staticmethod
def distribute_particles_geometric(block_np, num_procs, allow_zero=False):
""" distribute block_np to processors in a simple way
**algorithm**
# get the distribution size of each dimension using `get_distr_size()`
based on the domain size of the block_np
# divide the domain into rectangular grids
# assign block_np to each processor
# check empty processors and divide block_np in processor having
more than average block_np to the empty processors
"""
num_blocks = len(block_np)
block_arr = numpy.empty((num_blocks, 3))
num_particles_arr = numpy.empty((num_blocks,), dtype='int')
for i,block_id in enumerate(block_np):
block_arr[i,0] = block_id.x
block_arr[i,1] = block_id.y
block_arr[i,2] = block_id.z
num_particles_arr[i] = block_np[block_id]
np_per_proc = sum(num_particles_arr)/num_procs
lmin = numpy.min(block_arr[:,0])
bmin = numpy.min(block_arr[:,1])
hmin = numpy.min(block_arr[:,2])
# range of blocks in each dimension
l = numpy.max(block_arr[:,0])+1 - lmin
b = numpy.max(block_arr[:,1])+1 - bmin
h = numpy.max(block_arr[:,2])+1 - hmin
# distribution sizes in each dimension
s = LoadBalancer.get_distr_sizes(l,b,h,num_procs)
ld = l/s[0]
bd = b/s[1]
hd = h/s[2]
# allocate regions to procs
# deficit of actual processes to allocate
deficit = int(numpy.cumprod(s)[-1] - num_procs)
# sorted s
ss = numpy.argsort(s)
# reversed dict (value to index)
rss = numpy.empty(len(ss), dtype='int')
for i,si in enumerate(ss):
rss[si] = i
proc = 0
proc_blocks = [[] for i in range(num_procs)]
proc_map = {}
done = False
for i in range(int(s[ss[0]])):
for j in range(int(s[ss[1]])):
for k in range(int(s[ss[2]])):
if done:
done = False
continue
proc_map[tuple(numpy.array((i,j,k),dtype='int')[rss])] = proc
proc += 1
if deficit > 0 and k==0:
deficit -= 1
proc_map[tuple(numpy.array((i,j,k+1),dtype='int')[rss])] = proc-1
done = True
# allocate block_np to procs
proc_num_particles = [0 for i in range(num_procs)]
for i,block_id in enumerate(block_np):
index = (int((block_id.x-lmin)//ld), int((block_id.y-bmin)//bd),
int((block_id.z-hmin)//hd))
proc_blocks[proc_map[index]].append(block_id)
proc_num_particles[proc_map[index]] += block_np[block_id]
# return the distribution if procs with zero blocks are permitted
if allow_zero:
return proc_blocks, proc_num_particles
# add block_np to empty procs
proc_particles_s = numpy.argsort(proc_num_particles)
empty_procs = [proc for proc,np in enumerate(proc_num_particles) if np==0]
i = num_procs - 1
while len(empty_procs) > 0:
nparts = int(min(numpy.ceil(
proc_num_particles[proc_particles_s[i]]/float(np_per_proc)),
len(empty_procs)))
blocks = proc_blocks[proc_particles_s[i]]
nblocks = int((len(blocks)/float(nparts+1)))
proc_blocks[proc_particles_s[i]] = []
blocks_sorted = sorted(blocks, key=hash)
for j in range(nparts):
blocks2send = blocks_sorted[j*nblocks:(j+1)*nblocks]
proc_blocks[empty_procs[j]][:] = blocks2send
for bid in blocks2send:
proc_num_particles[empty_procs[j]] += block_np[bid]
proc_num_particles[proc_particles_s[i]] -= block_np[bid]
proc_blocks[proc_particles_s[i]][:] = blocks_sorted[(j+1)*nblocks:]
empty_procs[:nparts] = []
i -= 1
return proc_blocks, proc_num_particles
###########################################################################
@classmethod
def get_block_proc(self, proc_blocks):
block_proc = {}
for proc, bids in enumerate(proc_blocks):
block_proc.update(dict.fromkeys(bids, proc))
return block_proc
@classmethod
def get_load_imbalance(self, particles_per_proc):
"""return the imbalance in the load distribution = (max-avg)/max"""
total = sum(particles_per_proc)
avg = float(total)/len(particles_per_proc)
mx = max(particles_per_proc)
return (mx-avg)/mx
@classmethod
def get_quality(self, block_nbr_proc, block_proc, num_procs, ndim):
num_blocks = len(block_nbr_proc)
blocks_nbr = blocks_nbr_proc = procs_nbr = 0
max_nbrs = (3**ndim-1)
proc_nbrs = [set() for i in range(num_procs)]
for bid,proc_np in block_nbr_proc.iteritems():
pid = block_proc[bid]
blocks_nbr += 26 - proc_np.get(-1, 0) - proc_np.get(pid, 0)
blocks_nbr_proc += len(proc_np) - 1 - (-1 in proc_np)
proc_nbrs[pid].update(proc_np)
for pid, proc_nbrs_data in enumerate(proc_nbrs):
proc_nbrs_data.remove(-1)
proc_nbrs_data.remove(pid)
#print proc_nbrs
fac = num_blocks**((ndim-1.0)/ndim) * max_nbrs
blocks_nbr = blocks_nbr / fac
blocks_nbr_proc = blocks_nbr_proc / fac
procs_nbr = sum([len(i) for i in proc_nbrs])/float(num_procs)
return blocks_nbr, blocks_nbr_proc, procs_nbr
@classmethod
def get_metric(self, block_proc, particles_per_proc, ndim=None):
""" return a performance metric for the current load distribution """
if ndim is None:
# FIXME: detect the dimension of the problem
ndim = 2
imbalance = self.get_load_imbalance(particles_per_proc)
num_procs = len(particles_per_proc)
quality = self.get_quality(self.construct_nbr_block_info(block_proc),
block_proc, num_procs, ndim)
return (imbalance,) + quality
@classmethod
def plot(self, proc_blocks, show=True, save_filename=None):
try:
from enthought.mayavi import mlab
except:
logger.critical('LoadBalancer.plot(): need mayavi to plot')
return
block_idx = {}
#print [len(i) for i in proc_blocks]
i = 0
for procno, procblocks in enumerate(proc_blocks):
for block_id in procblocks:
block_idx[block_id] = i
i += 1
num_blocks = i
x = [0] * num_blocks
y = [0] * num_blocks
z = [0] * num_blocks
p = [0] * num_blocks
i = 0
for procno, procblocks in enumerate(proc_blocks):
for block_id in procblocks:
x[block_idx[block_id]] = block_id.x
y[block_idx[block_id]] = block_id.y
z[block_idx[block_id]] = block_id.z
p[block_idx[block_id]] = procno
i += 1
figure = mlab.figure(0, size=(1200,900))
plot = mlab.points3d(x, y, z, p, mode='cube', colormap='jet',
scale_mode='none', scale_factor=0.8, figure=figure)
engine = mlab.get_engine()
scene = engine.scenes[0]
scene.scene.parallel_projection = True
#scene.scene.camera.view_up = [0.0, 1.0, 0.0]
mlab.view(0,0)
if save_filename:
mlab.savefig(save_filename, figure=figure)
if show:
mlab.show()
@classmethod
def distribute_particle_arrays(cls, particle_arrays, num_procs, block_size,
max_iter=100, distr_func='single', **args):
"""Convenience function to distribute given particles into procs
Uses the load_balance_func_serial() function of LoadBalancer class to
distribute the particles. Balancing methods can be changed by passing
the same `args` as to the load_balance_func_serial method
"""
lb = get_load_balancer_class()()
lb.pid = 0
lb.num_procs = num_procs
lb.lb_max_iteration = max_iter
z = numpy.empty(0)
empty_props = []
constants = []
for pa in particle_arrays:
d = {}
for prop in pa.properties:
d[prop] = z
empty_props.append(d)
constants.append(pa.constants)
redistr_func = getattr(lb, 'load_redistr_'+distr_func)
#print redistr_func
lb.load_difference = [0] * lb.num_procs
# set cell size same as block size and operate on cells
cm = CellManager(particle_arrays, block_size, block_size)
#print 'num_cells=', len(cm.cells_dict), cm.block_size
lb.particles_per_proc = [0] * lb.num_procs
block_np = {}
for bid, cell in cm.cells_dict.iteritems():
block_np[bid] = cell.get_number_of_particles()
lb.proc_block_np = [{} for i in range(num_procs)]
lb.proc_block_np[0] = block_np
#print '(%d)'%self.pid, self.proc_block_np
for i, c in enumerate(lb.proc_block_np):
for cnp in c.values():
lb.particles_per_proc[i] += cnp
old_distr = {}
for proc_no, blocks in enumerate(lb.proc_block_np):
for bid in blocks:
old_distr[bid] = proc_no
lb.old_distr = old_distr
lb.block_proc = {}
lb.block_proc.update(old_distr)
#print '(%d)'%self.pid, self.block_proc
lb.block_nbr_proc = lb.construct_nbr_block_info(lb.block_proc)
lb.balancing_done = False
current_balance_iteration = 0
while lb.balancing_done == False and current_balance_iteration < max_iter:
#print '\riteration', current_balance_iteration,
lb.load_balance_serial_iter(redistr_func, **args)
current_balance_iteration += 1
na = len(cm.arrays_to_bin)
particle_arrays_per_proc = [[get_particle_array(**empty_props[j]) for j in range(na)] for
i in range(num_procs)]
cells_dict = cm.cells_dict
a2b = cm.arrays_to_bin
for bid, proc in lb.block_proc.iteritems():
cell = cells_dict[bid]
pid_list = []
cell.get_particle_ids(pid_list)
for i in range(na):
arr = particle_arrays_per_proc[proc][i]
arr.constants.update(constants[i])
arr.append_parray(a2b[i].extract_particles(pid_list[i]))
arr.set_name(a2b[i].name)
arr.set_particle_type(a2b[i].particle_type)
return particle_arrays_per_proc
@classmethod
def distribute_particles(cls, particle_array, num_procs, block_size,
max_iter=100, distr_func='auto', **args):
"""Same as distribute_particle_arrays but for a single particle array
"""
if isinstance(particle_array, (ParticleArray,)):
is_particle_array = True
pas = [particle_array]
else:
# assume particle_array is list of particle_arrays
is_particle_array = False
pas = particle_array
ret = cls.distribute_particle_arrays(pas, num_procs, block_size,
max_iter, distr_func, **args)
if is_particle_array:
ret = [i[0] for i in ret]
return ret
def get_load_balancer_class():
""" return load balancing class at the bottom of implementation hierarchy,
so that various types of load balancing methods can be used """
try:
from load_balancer_metis import LoadBalancerMetis as LoadBalancer
except ImportError:
try:
from load_balancer_mkmeans import LoadBalancerMKMeans as LoadBalancer
except ImportError:
try:
from load_balancer_sfc import LoadBalancerSFC as LoadBalancer
except ImportError:
pass
return LoadBalancer
| Python |
""" Tests for the parallel cell manager """
import nose.plugins.skip as skip
raise skip.SkipTest("Dont run this test via nose")
from pysph.parallel.simple_block_manager import SimpleBlockManager
from pysph.base.particles import Particles
from pysph.base.particle_array import get_particle_array
from pysph.base.point import IntPoint
import numpy
import pylab
import time
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = pid = comm.Get_rank()
def draw_cell(cell, color="b"):
centroid = base.Point()
cell.get_centroid(centroid)
half_size = 0.5 * cell.cell_size
x1, y1 = centroid.x - half_size, centroid.y - half_size
x2, y2 = x1 + cell.cell_size, y1
x3, y3 = x2, y1 + cell.cell_size
x4, y4 = x1, y3
pylab.plot([x1,x2,x3,x4,x1], [y1, y2, y3, y4,y1], color)
def draw_block(origin, block_size, block_id, color="r"):
half_size = 0.5 * block_size
x,y = [], []
xc = origin.x + ((block_id.x + 0.5) * proc_map.block_size)
yc = origin.y + ((block_id.y + 0.5) * proc_map.block_size)
x1, y1 = xc - half_size, yc - half_size
x2, y2 = x1 + block_size, y1
x3, y3 = x2, y2 + block_size
x4, y4 = x1, y3
pylab.plot([x1,x2,x3,x4,x1], [y1, y2, y3, y4,y1], color)
def draw_particles(cell, color="y"):
arrays = cell.arrays_to_bin
num_arrays = len(arrays)
index_lists = []
cell.get_particle_ids(index_lists)
x, y = [], []
for i in range(num_arrays):
array = arrays[i]
index_array = index_lists[i]
indices = index_lists[i].get_npy_array()
xarray, yarray = array.get('x','y')
for j in indices:
x.append(xarray[j])
y.append(yarray[j])
pylab.plot(x,y,color+"o")
def get_sorted_indices(cell):
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
print type(index_array)
return index_array
if pid == 0:
x = numpy.array( [0, 0.2, 0.4, 0.6, 0.8] * 5 )
y = numpy.array( [0.0, 0.0, 0.0, 0.0, 0.0,
0.2 ,0.2, 0.2, 0.2, 0.2,
0.4, 0.4, 0.4, 0.4, 0.4,
0.6, 0.6, 0.6, 0.6, 0.6,
0.8, 0.8, 0.8, 0.8, 0.8] )
x += 1e-10
y += 1e-10
h = numpy.ones_like(x) * 0.3/2.0
block_00 = 0, 1, 5, 6
block_10 = 2, 7
block_20 = 3, 4, 8, 9
block_01 = 10, 11
block_11 = 12
block_21 = 13, 14
block_02 = 15, 16, 20, 21
block_12 = 17, 22
block_22 = 18, 19, 23, 24
cids = [block_00, block_10, block_20,
block_01, block_11, block_21,
block_02, block_12, block_22]
if pid == 1:
x = numpy.array( [0.8, 1.0, 1.2, 1.4, 1.6] * 5 )
y = numpy.array( [0.0, 0.0, 0.0, 0.0, 0.0,
0.2, 0.2, 0.2, 0.2, 0.2,
0.4, 0.4, 0.4, 0.4, 0.4,
0.6, 0.6, 0.6, 0.6, 0.6,
0.8, 0.8, 0.8, 0.8, 0.8] )
x += 1e-10
y += 1e-10
h = numpy.ones_like(x) * 0.3/2.0
block_20 = 4, 9
block_30 = 1, 6
block_40 = 2, 3, 7, 8
block_50 = 4, 9
block_21 = 14
block_31 = 11
block_41 = 12, 13
block_51 = 14
block_22 = 15, 20
block_32 = 16, 21
block_42 = 17, 18, 22, 23
block_52 = 19, 24
cids = [block_20, block_30, block_40, block_50,
block_21, block_31, block_41, block_51,
block_22, block_32, block_42, block_52]
pa = get_particle_array(name="test"+str(rank), x=x, y=y, h=h)
particles = Particles(arrays=[pa,])
# create the block manager
pm = pm = SimpleBlockManager(block_scale_factor=2.0)
pm.initialize(particles)
cm = pm.cm
assert ( abs(pm.block_size - 0.3) < 1e-15 )
assert (pm.block_size == cm.cell_size)
cells_dict = cm.cells_dict
pmap = pm.processor_map
assert (len(cells_dict) == len(cids))
# call an update
pm.update()
# test the processor map's local and global cell map
local_cell_map = pmap.local_cell_map
global_cell_map = pmap.global_cell_map
assert (len(local_cell_map) == len(cells_dict))
for cid in local_cell_map:
assert( cid in cells_dict )
assert( list(local_cell_map[cid])[0] == rank )
if rank == 0:
other_cids = comm.recv(source=1)
comm.send(cids, dest=1)
if rank == 1:
comm.send(cids, dest=0)
other_cids = comm.recv(source=0)
conflicting_cells = IntPoint(2,0,0), IntPoint(2,1,0), IntPoint(2,2,0)
# check the conflicting cells
for cid in conflicting_cells:
assert ( cid in global_cell_map )
pids = list(global_cell_map[cid])
pids.sort()
assert ( pids == [0,1] )
# check the cells_to_send_list
cells_to_send = pmap.get_cell_list_to_send()
if rank == 0:
expected_list = [IntPoint(1,0), IntPoint(1,1), IntPoint(1,2),
IntPoint(2,0), IntPoint(2,1), IntPoint(2,2)]
cell_list = cells_to_send[1]
if rank == 1:
expected_list = [IntPoint(2,0), IntPoint(2,1), IntPoint(2,2),
IntPoint(3,0), IntPoint(3,1), IntPoint(3,2)]
cell_list = cells_to_send[0]
for cid in expected_list:
assert (cid in cell_list)
pa = pm.arrays[0]
print rank, pa.num_real_particles, pa.get_number_of_particles()
| Python |
""" Tests for the parallel cell manager """
import nose.plugins.skip as skip
raise skip.SkipTest("Dont run this test via nose")
import pysph.base.api as base
import pysph.parallel.api as parallel
import numpy
import pylab
import time
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
pid = comm.Get_rank()
def draw_cell(cell, color="b"):
centroid = base.Point()
cell.get_centroid(centroid)
half_size = 0.5 * cell.cell_size
x1, y1 = centroid.x - half_size, centroid.y - half_size
x2, y2 = x1 + cell.cell_size, y1
x3, y3 = x2, y1 + cell.cell_size
x4, y4 = x1, y3
pylab.plot([x1,x2,x3,x4,x1], [y1, y2, y3, y4,y1], color)
def draw_block(origin, block_size, block_id, color="r"):
half_size = 0.5 * block_size
x,y = [], []
xc = origin.x + ((block_id.x + 0.5) * proc_map.block_size)
yc = origin.y + ((block_id.y + 0.5) * proc_map.block_size)
x1, y1 = xc - half_size, yc - half_size
x2, y2 = x1 + block_size, y1
x3, y3 = x2, y2 + block_size
x4, y4 = x1, y3
pylab.plot([x1,x2,x3,x4,x1], [y1, y2, y3, y4,y1], color)
def draw_particles(cell, color="y"):
arrays = cell.arrays_to_bin
num_arrays = len(arrays)
index_lists = []
cell.get_particle_ids(index_lists)
x, y = [], []
for i in range(num_arrays):
array = arrays[i]
index_array = index_lists[i]
indices = index_lists[i].get_npy_array()
xarray, yarray = array.get('x','y')
for j in indices:
x.append(xarray[j])
y.append(yarray[j])
pylab.plot(x,y,color+"o")
def get_sorted_indices(cell):
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
print type(index_array)
return index_array
xc = numpy.arange(0,1.0, 0.2)
x, y = numpy.meshgrid(xc,xc)
x = x = x.ravel()
y = y = y.ravel()
h = h = numpy.ones_like(x) * 0.25
dx = dy = 0.2
dx = dx
block_size = 0.5
cell_size = 0.5
block_000_indices = 0,1,2,5,6,7,10,11,12
block_100_indices = 3,4,8,9,13,14
block_010_indices = 15,16,17,20,21,22
block_110_indices = 18,19,23,24
name = "rank" + str(pid)
pa = pa = base.get_particle_array(name="test", x=x, y=y, h=h)
if pid == 1:
pa.x += 1.0
pa.x += 1e-10
if pid == 2:
pa.y += 9
if pid == 3:
pa.x += 9; pa.y += 9
# create the cell manager
cm = cm = parallel.ParallelCellManager(arrays_to_bin=[pa,],
max_radius_scale=2.0,
dimension=2.0,
load_balancing=False,
initialize=False,
min_cell_size=0.5)
# find global min and max
cm.update_global_properties()
# compute block size
cm.compute_block_size(0.5)
# compute cell size
cm.compute_cell_size(0,0)
# setup array indices.
cm.py_rebuild_array_indices()
# setup the cells_dict
cm.py_setup_cells_dict()
# setup information for the processor map.
cm.setup_processor_map()
# build a single cell with all the particles
cm._build_cell()
cells_dict = cm.cells_dict
proc_map = cm.proc_map
# Test the initial setup
if pid == 0:
assert len(cells_dict) == 1, "At this stage only the base cell should exist"
cell = cells_dict.values()[0]
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
# check the indices
cid = cells_dict.keys()[0]
assert (cid.x, cid.y, cid.z) == (0,0,0)
for i in range(25):
assert index_array[i] == i
# test the block size for the processor map
assert proc_map.block_size == 0.5
print "Checking cells_update"
# check bin_particles
print "Testing bin particles: new_block_cells, remote_block_cells"
new_block_cells, remote_block_cells = cm.bin_particles()
# the local and global proc_map should be empty
assert len(proc_map.local_block_map) == 0
assert len(proc_map.block_map) == 0
# the remote block cells should be empty
assert len(remote_block_cells) == 0
# there should be four new block cells
bid1 = base.IntPoint(0,0,0)
bid2 = base.IntPoint(1,0,0)
bid3 = base.IntPoint(1,1,0)
bid4 = base.IntPoint(0,1,0)
assert new_block_cells.has_key(bid1)
assert new_block_cells.has_key(bid2)
assert new_block_cells.has_key(bid3)
assert new_block_cells.has_key(bid4)
# the cells dict should be empty as well at this point
assert len(cells_dict) == 0
# test the particle copies for the new blocks
print "Testing create_new_particle_copies"
new_block_particles = cm.create_new_particle_copies(new_block_cells,
False)
assert len(new_block_particles) == 4
# check particles in bid 0,0,0
parray_list = new_block_particles.get(bid1)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_000_indices)
# check particles in bid 1,0,0
parray_list = new_block_particles.get(bid2)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_100_indices)
# check particles in bid 1,1,0
parray_list = new_block_particles.get(bid3)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_110_indices)
# check particles in bid 0,1,0
parray_list = new_block_particles.get(bid4)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_010_indices)
print "Testing assign_new_blocks: proc_map"
# assign the new blocks to the processor map
cm.assign_new_blocks(new_block_cells)
# check the processor map
assert len(cm.proc_map.local_block_map) == 4
assert len(cm.proc_map.block_map) == 4
assert cm.proc_map.nbr_procs == [pid]
# compute cell size
cm.compute_cell_size()
assert cm.cell_size == 0.5
# ensure all particles are local (!=0)
pa = cm.arrays_to_bin[0]
local = pa.get("local", only_real_particles=False)
for i in range(pa.get_number_of_particles()):
assert local[i] != 0
print "Testing rebin particles"
# rebin particles
cm.rebin_particles()
# now check the cells_dict
cells_dict = cm.cells_dict
assert len(cells_dict) == 4
# check the particles in the cells
cids = [base.IntPoint(0,0,0), base.IntPoint(1,0,0),
base.IntPoint(1,1,0), base.IntPoint(0,1,0)]
index_map = [block_000_indices, block_100_indices,
block_110_indices, block_010_indices]
for i in range(4):
cid = cids[i]
cell = cells_dict.get(cid)
index_lists = []
cell.get_particle_ids(index_lists)
cell_indices = index_lists[0].get_npy_array()
cell_indices.sort()
assert list(cell_indices) == list(index_map[i])
request_to_start = True
go_on = False
comm.send(obj=request_to_start, dest=1)
print "Requested process 1 to catch up "
go_on = comm.recv(source=1)
if go_on:
print "Picking up from where we left... "
print "Testing glb_update_proc_map"
# update the global processor map
cm.remove_remote_particles()
cm.delete_empty_cells()
cm.proc_map.glb_update_proc_map(cm.cells_dict)
recv_particles = cm.proc_map.resolve_procmap_conflicts({})
cm.add_entering_particles_from_neighbors(recv_particles)
cm.remove_remote_particles()
# check the processor maps
print "Processor 0 Block Maps"
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid
print
print_yours=True
comm.send(obj=print_yours, dest=1)
print "Testing Neighbors 0"
assert cm.proc_map.nbr_procs == [0,1]
# exchange neighbor particles
cm.exchange_neighbor_particles()
print "Testing Exchange Neighbor Particles"
print "Cells Dict For Processor 0 After Exchange\n"
for cid, cell in cells_dict.iteritems():
print cid, "np = ", cell.get_number_of_particles()
print_yours=True
comm.send(obj=print_yours, dest=1)
print "Testing remote particle indices on Processor 0"
parray = cm.arrays_to_bin[0]
np = parray.get_number_of_particles()
nrp = parray.num_real_particles
assert nrp == 25
assert np == 40
local = parray.get("local", only_real_particles=False)
rpi = cm.remote_particle_indices[1][0]
assert rpi[0] == nrp
assert rpi[1] == np
for i in range(np):
if i >= nrp:
assert local[i] == 0
else:
assert local[i] == 1
# test the update of remote particle indices
print "Testing Update Remote Particle Properties on processor 0"
# change the local property say 'p' and 'rho' to -1
pa = cm.arrays_to_bin[0]
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
p[:nrp] = -1
rho[:nrp] = -1
for i in range(np):
if i >= nrp:
assert p[i] != -1
assert rho[i] != -1
yours_is_set = comm.recv(source=1)
if yours_is_set:
cm.update_remote_particle_properties([['p','rho']])
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
for i in range(np):
if i >= nrp:
assert p[i] == -1
assert rho[i] == -1
#####################################################################
# SECOND ITERATION
#####################################################################
# test the configuration
cids = [base.IntPoint(0,0,0), base.IntPoint(1,0,0), base.IntPoint(1,1,0),
base.IntPoint(0,1,0), base.IntPoint(2,0,0), base.IntPoint(2,1,0)]
pa = cm.arrays_to_bin[0]
for cid in cids:
assert cm.cells_dict.has_key(cid)
if cid in [base.IntPoint(2,0,0), base.IntPoint(2,1,0)]:
cell = cells_dict.get(cid)
index_lists = []
cell.get_particle_ids(index_lists)
parray = pa.extract_particles(index_lists[0])
local = parray.get('local', only_real_particles=False)
for val in local:
assert val == 0
# remove non local particles
cm.remove_remote_particles()
np = pa.get_number_of_particles()
assert np == 25
# move 6 particles in cell/block (1,0,0) to (2,0,0)
x = pa.get('x')
for i in block_100_indices:
x[i] += 0.5
cm.cells_update()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
assert np == 40
assert nrp == 19
# now move the 4 particles in cell/block (1,1,0) to block/cell (1,2,0)
y = pa.get('y')
cell_110 = cm.cells_dict.get(base.IntPoint(1,1,0))
index_lists = []
cell_110.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
for i in index_array:
y[i] += 0.5
# now call a cells update
cm.cells_update()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
assert nrp == 19 + 6
assert np == 19 + 6
if pid == 1:
start = False
start = comm.recv(source=0)
if start:
print "Process 1 starting after request "
assert len(cells_dict) == 1, "only the base cell should exist"
cell = cells_dict.values()[0]
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
# check the indices
cid = cells_dict.keys()[0]
#assert (cid.x, cid.y, cid.z) == (0,0,0)
for i in range(25):
#assert index_array[i] == i
pass
# test the block size for the processor map
assert proc_map.block_size == 0.5
print "Checking cells_update"
# check bin_particles
print "Testing bin particles: new_block_cells, remote_block_cells"
new_block_cells, remote_block_cells = cm.bin_particles()
# the local and global proc_map should be empty
assert len(proc_map.local_block_map) == 0
assert len(proc_map.block_map) == 0
# the remote block cells should be empty
assert len(remote_block_cells) == 0
# there should be four new block cells
bid1 = base.IntPoint(0,0,0)
bid2 = base.IntPoint(1,0,0)
bid3 = base.IntPoint(1,1,0)
bid4 = base.IntPoint(0,1,0)
#assert new_block_cells.has_key(bid1)
#assert new_block_cells.has_key(bid2)
#assert new_block_cells.has_key(bid3)
#assert new_block_cells.has_key(bid4)
# the cells dict should be empty as well at this point
assert len(cells_dict) == 0
# test the particle copies for the new blocks
print "Testing create_new_particle_copies"
new_block_particles = cm.create_new_particle_copies(new_block_cells,
False)
#assert len(new_block_particles) == 4
# check particles in bid 0,0,0
#parray_list = new_block_particles.get(bid1)
#assert len(parray_list) == 1
#parray = parray_list[0]
#indices = parray.get("idx")
#indices.sort()
#assert list(indices) == list(block_000_indices)
# check particles in bid 1,0,0
#parray_list = new_block_particles.get(bid2)
#assert len(parray_list) == 1
#parray = parray_list[0]
#indices = parray.get("idx")
#indices.sort()
#assert list(indices) == list(block_100_indices)
# check particles in bid 1,1,0
#parray_list = new_block_particles.get(bid3)
#assert len(parray_list) == 1
#parray = parray_list[0]
#indices = parray.get("idx")
#indices.sort()
#assert list(indices) == list(block_110_indices)
# check particles in bid 0,1,0
#parray_list = new_block_particles.get(bid4)
#assert len(parray_list) == 1
#parray = parray_list[0]
#indices = parray.get("idx")
#indices.sort()
#assert list(indices) == list(block_010_indices)
print "Testing assign_new_blocks: proc_map"
# assign the new blocks to the processor map
cm.assign_new_blocks(new_block_cells)
# check the processor map
#assert len(cm.proc_map.local_block_map) == 4
#assert len(cm.proc_map.block_map) == 4
#assert cm.proc_map.nbr_procs == [pid]
# compute cell size
cm.compute_cell_size()
assert cm.cell_size == 0.5
# ensure all particles are local (!=0)
pa = cm.arrays_to_bin[0]
local = pa.get("local")
for i in range(pa.get_number_of_particles()):
assert local[i] != 0
print "Testing rebin particles"
# rebin particles
cm.rebin_particles()
# now check the cells_dict
cells_dict = cm.cells_dict
#assert len(cells_dict) == 4
# check the particles in the cells
cids = [base.IntPoint(0,0,0), base.IntPoint(1,0,0),
base.IntPoint(1,1,0), base.IntPoint(0,1,0)]
index_map = [block_000_indices, block_100_indices,
block_110_indices, block_010_indices]
#for i in range(4):
# cid = cids[i]
# cell = cells_dict.get(cid)
# index_lists = []
# cell.get_particle_ids(index_lists)
# cell_indices = index_lists[0].get_npy_array()
# cell_indices.sort()
#assert list(cell_indices) == list(index_map[i])
print "Requesting process 0 to continue"
comm.send(obj=True, dest=0)
print "Testing glb_update_proc_map"
# update the global processor map
cm.remove_remote_particles()
cm.delete_empty_cells()
cm.proc_map.glb_update_proc_map(cm.cells_dict)
recv_particles = cm.proc_map.resolve_procmap_conflicts({})
cm.add_entering_particles_from_neighbors(recv_particles)
cm.remove_remote_particles()
# check the processor maps
time.sleep(.5)
should_i_print = comm.recv(source=0)
if should_i_print:
print "Processor 1 Block Maps"
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid
print
print "Testing Neighbors 1"
assert cm.proc_map.nbr_procs == [0,1]
# exchange neighbor particles
cm.exchange_neighbor_particles()
print "Testing Exchange Neighbor Particles"
should_i_print_cells_dict = comm.recv(source=0)
if should_i_print_cells_dict:
print "Cells Dict For Processor 1 After Exchange\n"
for cid, cell in cells_dict.iteritems():
print cid, "np = ", cell.get_number_of_particles()
print "Testing remote particle indices on Processor 1"
parray = cm.arrays_to_bin[0]
np = parray.get_number_of_particles()
nrp = parray.num_real_particles
assert nrp == 25
assert np == 35
local = parray.get("local", only_real_particles=False)
rpi = cm.remote_particle_indices[0][0]
assert rpi[0] == nrp
assert rpi[1] == np
for i in range(np):
if i >= nrp:
assert local[i] == 0
else:
assert local[i] == 1
# test the update of remote particle indices
print "Testing Update Remote Particle Properties on processor 1"
# change some local property say 'p' and 'rho' to -1
pa = cm.arrays_to_bin[0]
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
p[:nrp] = -1
rho[:nrp] = -1
for i in range(np):
if i >= nrp:
assert p[i] != -1
assert rho[i] != -1
mine_is_set = True
comm.send(obj=mine_is_set, dest=0)
cm.update_remote_particle_properties([['p','rho']])
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
for i in range(np):
if i >= nrp:
assert p[i] == -1
assert rho[i] == -1
cm.remove_remote_particles()
np = pa.get_number_of_particles()
assert np == 25
cm.cells_update()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
assert np == 35
assert nrp == 31
# now move particles in cell (2,1,0) to cell (1, 2, 0)
x, y = pa.get('x', 'y')
cell_210 = cm.cells_dict.get(base.IntPoint(2,1,0))
index_lists = []
cell_210.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
for i in index_array:
y[i] += 0.5
x[i] -= 0.5
# now call a cells update
cm.cells_update()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
assert nrp == 31 - 6
assert np == nrp
| Python |
""" Tests for the parallel cell manager """
import pysph.base.api as base
import pysph.parallel.api as parallel
import numpy
import time
import pdb
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
pid = comm.Get_rank()
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
xc = numpy.arange(0,1.0, 0.2)
x, y = numpy.meshgrid(xc,xc)
x = x = x.ravel()
y = y = y.ravel()
h = h = numpy.ones_like(x) * 0.25
dx = dy = 0.2
dx = dx
block_size = 0.5
cell_size = 0.5
block_000_indices = 0,1,2,5,6,7,10,11,12
block_100_indices = 3,4,8,9,13,14
block_010_indices = 15,16,17,20,21,22
block_110_indices = 18,19,23,24
name = "rank" + str(pid)
pa = base.get_particle_array(name="test", x=x, y=y, h=h)
pa.x += 1.0*pid
pa.x += 1e-10
# create the cell manager
cm = cm = parallel.ParallelCellManager(arrays_to_bin=[pa,],
max_radius_scale=2.0,
dimension=2.0,
load_balancing=False,
initialize=False,
min_cell_size=0.5)
# find global min and max
cm.update_global_properties()
# compute block size
cm.compute_block_size(0.5)
# compute cell size
cm.compute_cell_size(0,0)
# setup array indices.
cm.py_rebuild_array_indices()
# setup the cells_dict
cm.py_setup_cells_dict()
# setup information for the processor map.
cm.setup_processor_map()
# build a single cell with all the particles
cm._build_cell()
cells_dict = cm.cells_dict
proc_map = cm.proc_map
# Test the initial setup
assert len(cells_dict) == 1, "At this stage only the base cell should exist"
cell = cells_dict.values()[0]
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
# check the indices
#cid = cells_dict.keys()[0]
#assert (cid.x, cid.y, cid.z) == [(0,0,0),(2,0,0)][pid]
for i in range(25):
assert index_array[i] == i
# test the block size for the processor map
assert proc_map.block_size == 0.5
print "Checking cells_update"
# check bin_particles
print "Testing bin particles: new_block_cells, remote_block_cells"
new_block_cells, remote_block_cells = cm.bin_particles()
# the local and global proc_map should be empty
assert len(proc_map.local_block_map) == 0
assert len(proc_map.block_map) == 0
# the remote block cells should be empty
assert len(remote_block_cells) == 0
# there should be four new block cells
bid1 = base.IntPoint(0+2*pid,0,0)
bid2 = base.IntPoint(1+2*pid,0,0)
bid3 = base.IntPoint(1+2*pid,1,0)
bid4 = base.IntPoint(0+2*pid,1,0)
print new_block_cells
assert new_block_cells.has_key(bid1)
assert new_block_cells.has_key(bid2)
assert new_block_cells.has_key(bid3)
assert new_block_cells.has_key(bid4)
# the cells dict should be empty as well at this point
assert len(cells_dict) == 0
# test the particle copies for the new blocks
print "Testing create_new_particle_copies"
new_block_particles = cm.create_new_particle_copies(new_block_cells,
False)
assert len(new_block_particles) == 4
# check particles in bid 0,0,0
parray_list = new_block_particles.get(bid1)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_000_indices)
# check particles in bid 1,0,0
parray_list = new_block_particles.get(bid2)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_100_indices)
# check particles in bid 1,1,0
parray_list = new_block_particles.get(bid3)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_110_indices)
# check particles in bid 0,1,0
parray_list = new_block_particles.get(bid4)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_010_indices)
print "Testing assign_new_blocks: proc_map"
# assign the new blocks to the processor map
cm.assign_new_blocks(new_block_cells)
# check the processor map
assert len(cm.proc_map.local_block_map) == 4
assert len(cm.proc_map.block_map) == 4
assert cm.proc_map.nbr_procs == [pid]
# compute cell size
cm.compute_cell_size()
assert cm.cell_size == 0.5
# ensure all particles are local (!=0)
pa = cm.arrays_to_bin[0]
local = pa.get("local", only_real_particles=False)
for i in range(pa.get_number_of_particles()):
assert local[i] != 0
print "Testing rebin particles"
# rebin particles
cm.rebin_particles()
# now check the cells_dict
cells_dict = cm.cells_dict
assert len(cells_dict) == 4
# check the particles in the cells
cids = [base.IntPoint(0+2*pid,0,0), base.IntPoint(1+2*pid,0,0),
base.IntPoint(1+2*pid,1,0), base.IntPoint(0+2*pid,1,0)]
index_map = [block_000_indices, block_100_indices,
block_110_indices, block_010_indices]
for i in range(4):
cid = cids[i]
cell = cells_dict.get(cid)
index_lists = []
cell.get_particle_ids(index_lists)
cell_indices = index_lists[0].get_npy_array()
cell_indices.sort()
assert list(cell_indices) == list(index_map[i])
print "Testing glb_update_proc_map"
# update the global processor map
cm.remove_remote_particles()
cm.delete_empty_cells()
cm.proc_map.glb_update_proc_map(cm.cells_dict)
recv_particles = cm.proc_map.resolve_procmap_conflicts({})
cm.add_entering_particles_from_neighbors(recv_particles)
cm.remove_remote_particles()
# check the processor maps
print "Processor", pid, "Block Maps"
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid, cm.proc_map.block_map[blockid]
print
print "Testing Neighbors", pid
assert cm.proc_map.nbr_procs == [i for i in (pid-1, pid, pid+1) if i>=0 and i<num_procs]
# exchange neighbor particles
cm.exchange_neighbor_particles()
print "Testing Exchange Neighbor Particles"
print "Cells Dict For Processor", pid, "After Exchange\n"
for cid, cell in cells_dict.iteritems():
print cid, "np = ", cell.get_number_of_particles()
#print_yours=True
#comm.send(obj=print_yours, dest=1)
print "Testing remote particle indices on Processor", pid
parray = cm.arrays_to_bin[0]
np = parray.get_number_of_particles()
nrp = parray.num_real_particles
assert nrp == 25, "nrp=%r"%nrp
assert np == nrp + 15*(pid<num_procs-1)+10*(pid>0), "np=%r != %r"%(np,
nrp + 15*(pid<num_procs)+10*(pid>0))
local = parray.get("local", only_real_particles=False)
for i in proc_map.nbr_procs:
if i == pid: continue
rpi = cm.remote_particle_indices[i][0]
print pid, cm.remote_particle_indices
r = nrp + 10*(pid<i and pid>0)
assert rpi[0] == r, "%r,%r, rpi[0]=%r, r=%r"%(i,pid, rpi[0], r)
r = r + 10 + 5*(pid<i)
assert rpi[1] == r, "%r,%r, rpi[1]=%r != %r"%(i,pid, rpi[1], r)
for i in range(np):
if i >= nrp:
assert local[i] == 0
else:
assert local[i] == 1
# test the update of remote particle indices
print "Testing Update Remote Particle Properties on processor", pid
# change the local property say 'p' and 'rho' to -1
pa = cm.arrays_to_bin[0]
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
p[:nrp] = -1
rho[:nrp] = -1
for i in range(np):
if i >= nrp:
assert p[i] != -1
assert rho[i] != -1
#yours_is_set = comm.recv(source=1)
#if yours_is_set:
cm.update_remote_particle_properties([['p','rho']])
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
for i in range(np):
if i >= nrp:
assert p[i] == -1
assert rho[i] == -1
#####################################################################
# SECOND ITERATION
#####################################################################
# test the configuration
cids = [base.IntPoint(0+pid*2,0,0), base.IntPoint(1+pid*2,0,0),
base.IntPoint(1+pid*2,1,0), base.IntPoint(0+pid*2,1,0)]
for nbr in proc_map.nbr_procs:
if nbr == pid: continue
if nbr < pid:
cids.append(base.IntPoint(-1+pid*2,0,0))
cids.append(base.IntPoint(-1+pid*2,1,0))
elif nbr > pid:
cids.append(base.IntPoint(2+pid*2,0,0))
cids.append(base.IntPoint(2+pid*2,1,0))
pa = cm.arrays_to_bin[0]
for cid in cids:
assert cm.cells_dict.has_key(cid), "%r %r"%(pid, cid)
if cid in [base.IntPoint(2-pid,0,0), base.IntPoint(2-pid,1,0)]:
cell = cells_dict.get(cid)
index_lists = []
cell.get_particle_ids(index_lists)
parray = pa.extract_particles(index_lists[0])
local = parray.get('local', only_real_particles=False)
for val in local:
assert val == 0
# remove non local particles
cm.remove_remote_particles()
cm.delete_empty_cells()
np = pa.get_number_of_particles()
assert np == 25
# move 6 particles in cell/block (1,0,0) to (2,0,0) in pid=0
x = pa.get('x')
if pid == 0:
for i in block_100_indices:
x[i] += 0.5
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid, cm.proc_map.block_map[blockid]
print
cm.cells_update()
npr = sum([i.num_real_particles for i in cm.arrays_to_bin])
nprt = comm.bcast(comm.reduce(npr))
assert nprt==25*num_procs
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid, cm.proc_map.block_map[blockid]
print
print cm.cells_dict.values()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
print pid, np, nrp
if num_procs == 2:
assert np == [40,35][pid], '%r, %r'%(pid, np)
assert nrp == [19,31][pid], '%r, %r'%(pid, np)
# now move the 4 particles in cell/block (1,1,0) to block/cell (1,2,0) in pid=0
# and particles in cell (2,1,0) to cell (1, 2, 0) in pid=1
x, y = pa.get('x', 'y')
if num_procs == 2:
cell = cm.cells_dict.get(base.IntPoint([1,2][pid],1,0))
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
print index_array
for i in index_array:
y[i] += 0.5
if pid == 1:
x[i] -= 0.5
# now call a cells update
cm.cells_update()
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid, cm.proc_map.block_map[blockid]
print
print cm.cells_dict.values()
npr = sum([i.num_real_particles for i in cm.arrays_to_bin])
nprt = comm.bcast(comm.reduce(npr))
assert nprt==25*num_procs
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
print pid, np
if num_procs == 2:
assert nrp == [19 + 6, 31 - 6][pid]
#assert np == nrp + 10
#assert np == [np, 41][pid]
assert np == nrp, "%r, %r!=%r"%(pid,np, nrp)
cells_nps = {base.IntPoint(0,0,0):9,
base.IntPoint(2,0,0):15,
base.IntPoint(3,0,0):6,
base.IntPoint(0,1,0):6,
base.IntPoint(3,1,0):4,
base.IntPoint(1,2,0):10,
}
print cm.proc_map.block_map
for cid, cell in cm.cells_dict.iteritems():
print cid, cell, cell.get_number_of_particles()
assert cell.get_number_of_particles() == cells_nps[cid], '%r'%(cell)
npr = sum([i.num_real_particles for i in cm.arrays_to_bin])
assert comm.bcast(comm.reduce(npr)) == 50
assert nrp == [19 + 6, 31 - 6][pid]
cm.cells_update()
npr = sum([i.num_real_particles for i in cm.arrays_to_bin])
assert comm.bcast(comm.reduce(npr)) == 50
assert nrp == [19 + 6, 31 - 6][pid]
#print cm.proc_map.nbr_procs
| Python |
"""
Simple script to check if the load balancing works on 2-d data.
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
# logging setup
# logging setup
import logging
logger = logging.getLogger()
log_file_name = '/tmp/log_pysph_'+str(rank)
logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
filemode='w')
logger.addHandler(logging.StreamHandler())
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.solver.basic_generators import RectangleGenerator
from pysph.base.cell import INT_INF
from pysph.base.point import *
pcm = ParallelCellManager(initialize=False, dimension=2)
parray = ParticleArray(name='parray')
if rank == 0:
lg = RectangleGenerator(particle_spacing_x1=0.1,
particle_spacing_x2=0.1)
x, y, z = lg.get_coords()
parray.add_property({'name':'x', 'data':x})
parray.add_property({'name':'y', 'data':y})
parray.add_property({'name':'z', 'data':z})
parray.add_property({'name':'h'})
parray.align_particles()
parray.h[:] = 0.1
else:
parray.add_property({'name':'x'})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'h'})
pcm.add_array_to_bin(parray)
pcm.initialize()
| Python |
"""
Simple script to check if copies of remote data are properly done.
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi import
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
if num_procs > 3:
raise SystemError, 'Start this script with 3 processors'
rank = comm.Get_rank()
# logging setup
import logging
logger = logging.getLogger()
log_file_name = '/tmp/log_pysph_'+str(rank)
logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
filemode='w')
logger.addHandler(logging.StreamHandler())
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.solver.basic_generators import LineGenerator
from pysph.base.cell import INT_INF
from pysph.base.point import *
pcm = ParallelCellManager(initialize=False)
# create two particles, one with proc 0 another with proc 1
if rank == 0:
parray = ParticleArray()
parray.add_property({'name':'x', 'data':[0.4]})
parray.add_property({'name':'h', 'data':[0.1]})
elif rank == 1:
parray = ParticleArray()
parray.add_property({'name':'x', 'data':[1.2]})
parray.add_property({'name':'h', 'data':[0.1]})
elif rank == 2:
parray = ParticleArray()
parray.add_property({'name':'x', 'data':[2.0]})
parray.add_property({'name':'h', 'data':[0.1]})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'t'})
parray.align_particles()
logger.debug('%s, %s'%(parray.x, parray.t))
pcm.add_array_to_bin(parray)
pcm.initialize()
# set the 't' property in proc 0 to -1000 and proc 1 to 1000.
if rank == 0:
parray.t[0] = 1000.
if rank == 1:
parray.t[0] = 2000.
if rank == 2:
parray.t[0] = 3000.
# get remote data.
pcm.update_remote_particle_properties([['t']])
logger.debug('t is %s'%(parray.get('t', only_real_particles=False)))
| Python |
""" Test the share_data function for various cases
cases to run are chosen based on the size of the MPI.COMM_wORLD
case 1: for 5 processes
Processors arrangement:
4
0 1 2 3
Nbr lists:
0: 1,4
1: 0,2,4
2: 1,3,4
3: 2
4: 0,1,2
case 2: for 2 processes
both neighbors of each other
case 3,4,5: n processes (n>1 for case 5)
all neighbors of each other
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
from pysph.parallel.parallel_cell import share_data
def case1(multi=True, to_self=False):
""" 5 processes """
if num_procs != 5: return
nbr_lists = [[1,4],
[0,2,4],
[1,3,4],
[2],
[0,1,2],
]
nbr_list = nbr_lists[rank]
if to_self: nbr_list.append(rank)
proc_data = {}
for nbr in nbr_list:
proc_data[nbr] = (rank, nbr)
recv_data = share_data(rank, nbr_list, proc_data, comm, multi=multi)
assert len(recv_data) == len(nbr_list)
if multi:
for pid,data in recv_data.iteritems():
assert data == (pid, rank)
else:
for pid,data in recv_data.iteritems():
for pid2,data2 in data.iteritems():
assert data2 == (pid, pid2)
def case2():
""" 2 processes """
if num_procs != 2: return
nbr_list = [1-rank]
proc_data = {1-rank:(rank, 1-rank)}
recv_data = share_data(rank, nbr_list, proc_data, comm, multi=True)
print rank, recv_data
def case3(multi=True, to_self=False):
""" all-to-all communication """
nbr_list = range(num_procs)
if not to_self: nbr_list.remove(rank)
proc_data = {}
for nbr in nbr_list:
proc_data[nbr] = (rank, nbr)
recv_data = share_data(rank, nbr_list, proc_data, comm, multi=multi)
assert len(recv_data) == len(nbr_list)
if multi:
for pid,data in recv_data.iteritems():
assert data == (pid, rank)
else:
print rank, recv_data
for pid,data in recv_data.iteritems():
for pid2,data2 in data.iteritems():
assert data2 == (pid, pid2)
def case4(multi=True, to_self=False):
""" all-to-all oneway communication """
nbr_list = range(num_procs)
if not to_self: nbr_list.remove(rank)
proc_data = {}
for nbr in nbr_list:
proc_data[nbr] = (rank, nbr)
recv_data = share_data(rank, nbr_list, proc_data, comm, multi=multi)
assert len(recv_data) == len(nbr_list)
if multi:
for pid,data in recv_data.iteritems():
assert data == (pid, rank)
else:
print rank, recv_data
for pid,data in recv_data.iteritems():
for pid2,data2 in data.iteritems():
assert data2 == (pid, pid2)
def case5(multi=True, to_self=False):
""" oneway communication to next two consecutive procs """
send_procs = [(rank+1)%num_procs, (rank+2)%num_procs]
recv_procs = [(rank-1)%num_procs, (rank-2)%num_procs]
proc_data = {}
for nbr in send_procs:
proc_data[nbr] = (rank, nbr)
print rank, send_procs, recv_procs, proc_data
recv_data = share_data(rank, send_procs, proc_data, comm, multi=multi,
recv_procs=recv_procs)
assert len(recv_data) == len(recv_procs)
if multi:
for pid,data in recv_data.iteritems():
assert data == (pid, rank)
else:
print rank, recv_data
for pid,data in recv_data.iteritems():
for pid2,data2 in data.iteritems():
assert data2 == (pid, pid2)
if __name__ == '__main__':
if num_procs == 2:
case2()
for multi in True,False:
for to_self in True,False:
if num_procs == 5:
case1(multi, to_self)
case3(multi, to_self)
case4(multi, to_self)
if num_procs > 1:
case5(multi, to_self)
| Python |
""" Tests for the parallel cell manager """
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
import pysph.base.api as base
import pysph.parallel.api as parallel
from time import time
import numpy
import pylab
import pdb
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
pid = comm.Get_rank()
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
xc = numpy.arange(0,1.0, 0.2)
x, y = numpy.meshgrid(xc,xc)
x = x = x.ravel()
y = y = y.ravel()
h = h = numpy.ones_like(x) * 0.25
dx = dy = 0.2
dx = dx
block_size = 0.5
cell_size = 0.5
block_000_indices = 0,1,2,5,6,7,10,11,12
block_100_indices = 3,4,8,9,13,14
block_010_indices = 15,16,17,20,21,22
block_110_indices = 18,19,23,24
name = "rank" + str(pid)
pa = base.get_particle_array(name="test", x=x, y=y, h=h)
pa.x += 1.0*pid
pa.x += 1e-10
pa.y += 1.0*(pid%2)
pa.y += 1e-10
# create the cell manager
cm = parallel.ParallelCellManager(arrays_to_bin=[pa,],
max_radius_scale=2.0,
dimension=2.0,
load_balancing=False,
initialize=False,
min_cell_size=0.5)
t = time()
cm.initialize()
t = time() - t
print 'initialize time', t
cells_dict = cm.cells_dict
proc_map = cm.proc_map
print 'cells_dict'
print cells_dict
print
print 'block_map'
print proc_map.block_map
print 'load_per_proc'
print proc_map.load_per_proc
t = time()
cm.cells_update()
t = time() - t
print 'cells_update time', t
print 'cells_dict'
print cells_dict
print
print 'block_map'
print proc_map.block_map
print 'load_per_proc'
print proc_map.load_per_proc
print 'moving all but one blocks to proc 0'
t = time()
#send all but one block to proc=0
if pid > 0:
cm.transfer_blocks_to_procs({0:proc_map.local_block_map.keys()[1:]},
recv_procs=[])
else:
cm.transfer_blocks_to_procs({}, recv_procs=range(1,num_procs))
t = time() - t
print 'transfer_blocks time', t
t = time()
cm.delete_empty_cells()
cm.rebin_particles()
cm.cells_update()
t = time() - t
print 'cells_update time', t
print 'cells_dict'
print cells_dict
print
print 'block_map'
print proc_map.block_map
print 'load_per_proc'
print proc_map.load_per_proc
t = time()
cm.load_balancer.load_balance(adaptive=True)
t = time() - t
print 'load_balance time', t
t = time()
#send all blocks to proc=0
if pid > 0:
cm.transfer_blocks_to_procs({0:proc_map.local_block_map.keys()},
recv_procs=[])
else:
cm.transfer_blocks_to_procs({}, recv_procs=range(1,num_procs))
t = time() - t
print 'transfer_blocks time', t
t = time()
cm.cells_update()
t = time() - t
print 'cells_update time', t
print 'cells_dict'
print cells_dict
print
print 'block_map'
print proc_map.block_map
print 'load_per_proc'
print proc_map.load_per_proc
print 'testing load_balance'
t = time()
cm.load_balancer.load_balance(adaptive=True)
t = time() - t
print 'load_balance time', t
| Python |
"""
Simple script to check if the load balancing works on 1-d data.
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
# logging setup
# logging setup
import logging
logger = logging.getLogger()
log_file_name = '/tmp/log_pysph_'+str(rank)
logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
filemode='w')
logger.addHandler(logging.StreamHandler())
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.solver.basic_generators import LineGenerator
from pysph.base.cell import INT_INF
from pysph.base.point import *
pcm = ParallelCellManager(initialize=False, dimension=1)
parray = ParticleArray(name='parray')
if rank == 0:
lg = LineGenerator(start_point=Point(0, 0, 0),
end_point=Point(1.0, 0, 0),
particle_spacing=0.01)
x, y, z = lg.get_coords()
parray.add_property({'name':'x', 'data':x})
parray.add_property({'name':'y', 'data':y})
parray.add_property({'name':'z', 'data':z})
parray.add_property({'name':'h'})
parray.align_particles()
parray.h[:] = 0.01
else:
parray.add_property({'name':'x'})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'h'})
pcm.add_array_to_bin(parray)
pcm.initialize()
| Python |
""" Some checks for the parallel cell manager.
Run this script only with less than 5 processors.
example : mpiexec -n 2 python parallel_cell_check.py
"""
import time
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
#if num_procs > 4:
# raise SystemError, 'Start this script on less than 5 processors'
rank = comm.Get_rank()
# logging setup
import logging
logger = logging.getLogger()
#log_file_name = 'parallel_cell_check.log.'+str(rank)
#logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
# filemode='w')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.solver.basic_generators import LineGenerator
from pysph.base.cell import INT_INF
from pysph.base.point import *
from pysph.parallel.load_balancer import LoadBalancer
from nose.plugins.attrib import attr
@attr(parallel=True)
def test():
pcm = ParallelCellManager(initialize=False)
# create 2 particles, one with proc 0 another with proc 1
lg = LineGenerator(particle_spacing=0.5)
lg.start_point.x = 0.0
lg.end_point.x = 10.0
lg.start_point.y = lg.start_point.z = 0.0
lg.end_point.y = lg.end_point.z = 0.0
x, y, z = lg.get_coords()
num_particles = len(x)
logger.info('Num particles : %d'%(len(x)))
parray = ParticleArray(name='p1',
x={'data':x},
y={'data':y},
z={'data':z},
h={'data':None, 'default':0.5})
# add parray to the cell manager
parray.add_property({'name':'u'})
parray.add_property({'name':'v'})
parray.add_property({'name':'w'})
parray.add_property({'name':'rho'})
parray.add_property({'name':'p'})
parray = LoadBalancer.distribute_particles(parray, num_procs, 1.0)[rank]
pcm.add_array_to_bin(parray)
np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles
pcm.initialize()
np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles
pcm.set_jump_tolerance(INT_INF())
logger.debug('%d: num_cells=%d'%(rank,len(pcm.cells_dict)))
logger.debug('%d:'%rank + ('\n%d '%rank).join([str(c) for c in pcm.cells_dict.values()]))
# on processor 0 move all particles from one of its cell to the next cell
if rank == 0:
cell = pcm.cells_dict.get(list(pcm.proc_map.cell_map.values()[0])[0])
logger.debug('Cell is %s'%(cell))
indices = []
cell.get_particle_ids(indices)
indices = indices[0]
logger.debug('Num particles in Cell is %d'%(indices.length))
parr = cell.arrays_to_bin[0]
x, y, z = parr.get('x', 'y', 'z', only_real_particles=False)
logger.debug(str(len(x)) + str(x))
logger.debug(str(indices.length) + str(indices.get_npy_array()))
for i in range(indices.length):
x[indices[i]] += cell.cell_size
parr.set_dirty(True)
pcm.update_status()
logger.debug('Calling cell manager update')
logger.debug('Is dirty %s'%(pcm.is_dirty))
pcm.update()
np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles
#logger.debug('hierarchy :%s'%(pcm.hierarchy_list))
logger.debug('cells : %s'%(pcm.cells_dict))
logger.debug('num particles : %d'%(parray.get_number_of_particles()))
logger.debug('real particles : %d'%(parray.num_real_particles))
| Python |
""" Tests for the parallel cell manager """
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
import pysph.base.api as base
import pysph.parallel.api as parallel
from time import time
import numpy
import pylab
import pdb
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
pid = comm.Get_rank()
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
xc = numpy.arange(0,1.0, 0.2)
x, y = numpy.meshgrid(xc,xc)
x = x = x.ravel()
y = y = y.ravel()
h = h = numpy.ones_like(x) * 0.25
dx = dy = 0.2
dx = dx
block_size = 0.5
cell_size = 0.5
block_000_indices = 0,1,2,5,6,7,10,11,12
block_100_indices = 3,4,8,9,13,14
block_010_indices = 15,16,17,20,21,22
block_110_indices = 18,19,23,24
name = "rank" + str(pid)
pa = base.get_particle_array(name="test", x=x, y=y, h=h)
pa.x += 1.0*pid
pa.x += 1e-10
pa.y += 1.0*(pid%2)
pa.y += 1e-10
# create the cell manager
cm = parallel.ParallelCellManager(arrays_to_bin=[pa,],
max_radius_scale=2.0,
dimension=2.0,
load_balancing=False,
initialize=False,
min_cell_size=0.5)
t = time()
cm.initialize()
t = time() - t
print 'initialize time', t
cells_dict = cm.cells_dict
proc_map = cm.proc_map
print 'cells_dict'
print cells_dict
print
print 'block_map'
print proc_map.block_map
print 'load_per_proc'
print proc_map.load_per_proc
t = time()
cm.cells_update()
t = time() - t
print 'cells_update time', t
print 'cells_dict'
print cells_dict
print
print 'block_map'
print proc_map.block_map
print 'load_per_proc'
print proc_map.load_per_proc
print 'moving all but one blocks to proc 0'
t = time()
#send all but one block to proc=0
if pid > 0:
cm.transfer_blocks_to_procs({0:proc_map.local_block_map.keys()[1:]},
recv_procs=[])
else:
cm.transfer_blocks_to_procs({}, recv_procs=range(1,num_procs))
t = time() - t
print 'transfer_blocks time', t
t = time()
cm.delete_empty_cells()
cm.rebin_particles()
cm.cells_update()
t = time() - t
print 'cells_update time', t
print 'cells_dict'
print cells_dict
print
print 'block_map'
print proc_map.block_map
print 'load_per_proc'
print proc_map.load_per_proc
t = time()
cm.load_balancer.load_balance()
t = time() - t
print 'load_balance time', t
t = time()
#send all blocks to proc=0
if pid > 0:
cm.transfer_blocks_to_procs({0:proc_map.local_block_map.keys()},
recv_procs=[])
else:
cm.transfer_blocks_to_procs({}, recv_procs=range(1,num_procs))
t = time() - t
print 'transfer_blocks time', t
t = time()
cm.cells_update()
t = time() - t
print 'cells_update time', t
print 'cells_dict'
print cells_dict
print
print 'block_map'
print proc_map.block_map
print 'load_per_proc'
print proc_map.load_per_proc
print 'testing load_balance'
t = time()
cm.load_balancer.load_balance()
t = time() - t
print 'load_balance time', t
| Python |
#!/bin/env python
"""
Simple test for checking if the control tree is setup properly.
Run this script with the following command
mpiexec -n [num_procs] python controller_check.py
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# logging setup
import logging
logging.basicConfig(level=logging.DEBUG, filename='/tmp/log_pysph', filemode='a')
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
from mpi4py import MPI
from pysph.parallel.parallel_controller import ParallelController
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
logger.info('(%d)================controller_check====================='%(rank))
p = ParallelController()
assert p.rank == rank
if rank == 0:
assert p.parent_rank == -1
else:
if rank % 2 == 0:
assert p.parent_rank == ((rank)/2 -1)
else:
assert p.parent_rank == ((rank-1)/2)
if num_procs <= 2*rank + 1:
assert p.l_child_rank == -1
assert p.r_child_rank == -1
elif num_procs <= 2*rank + 2:
assert p.l_child_rank == 2*rank + 1
assert p.r_child_rank == -1
else:
assert p.l_child_rank == 2*rank + 1
assert p.r_child_rank == 2*rank + 2
logger.info('(%d)================controller_check====================='%(rank))
| Python |
"""A parallel manager that uses blocks to partition the domain. At
every iteration, the particles are placed in large bins and these bins
are exchanged across processors.
"""
from parallel_controller import ParallelController
from parallel_manager import ParallelManager
from parallel_cell import share_data
from pysph.base.fast_utils import arange_long
from pysph.base.particle_array import ParticleArray, get_dummy_tag
from pysph.base.cell import py_construct_immediate_neighbor_list
from pysph.base.cell import CellManager
import numpy
# logger imports
import logging
logger = logging.getLogger()
class ProcessorMap(object):
"""The ProcessorMap determines neighboring processors and a list
of cells to send to each processor.
The main data used by the ProcessorMap is the `cells_dict`
corresponding to each processor's local binning. The cell
information is used to construct three dictionaries:
local_cell_map : A dictionary keyed on cell id and with the value
equal to the local processor rank that created this cell.
global_cell_map : A dictionary keyed on cell id and with value a
set of processor ranks that created this cell.
Two processors may own the same region in space and no attempt is
made to resolve this conflict. A suitable subclass may provide a
mechanism to do so.
"""
def __init__(self, parallel_controller=None):
"""Constructor.
Parameters:
-----------
parallel_controller : pysph.base.parallel.ParallelController
The controller object which manages the child and
parent processor ranks required for a global update.
"""
self.parallel_controller = parallel_controller
if parallel_controller is None:
self.parallel_controller = ParallelController()
self.rank = self.parallel_controller.rank
self.comm = self.parallel_controller.comm
self.local_cell_map = {}
self.global_cell_map = {}
self.conflicts = {}
def _local_update(self, cells_dict):
"""Update the local cell map.
The `local_cell_map` is a dictionary keyed on cell id with
value the rank of te local processor that created this cell.
"""
self.local_cell_map = {}
self.global_cell_map = {}
for cid, cell in cells_dict.iteritems():
self.local_cell_map[cid] = set( [self.rank] )
self.global_cell_map[cid] = set( [self.rank] )
def global_update(self, cells_dict):
"""Update the gglobal cell map.
The local cell maps from all processors are passed through the
tree and updated at each stage. After a call to this function,
every processor has the same gobal cell map.
The global cell map is keyed on cell id with value, a list of
processor ranks that created this cell.
"""
self._local_update(cells_dict)
self.conflicts = {}
pc = self.parallel_controller
comm = self.comm
# merge data from all children proc maps.
for c_rank in pc.children_proc_ranks:
c_cell_map = comm.recv(source=c_rank)
# merge the data
for cid in c_cell_map:
if cid in self.global_cell_map:
self.global_cell_map[cid].update( c_cell_map[cid] )
else:
self.global_cell_map[cid] = c_cell_map[cid]
# we now have partially merged data, send it to parent if not root.
if pc.parent_rank > -1:
comm.send(self.global_cell_map, dest=pc.parent_rank)
# receive updated proc map from parent
updated_cell_map = comm.recv(source=pc.parent_rank)
# update the global cell map
self.global_cell_map.clear()
self.global_cell_map.update( updated_cell_map )
# send updated data to children.
for c_rank in pc.children_proc_ranks:
comm.send(self.global_cell_map, dest=c_rank)
def get_cell_list_to_send(self):
"""Return a list of cells to send to each processor.
Neighboring cells are determined allowing for cells to be
shared across processors. The return value is a dictionary
keyed on processor id with value equal to the list of cells to
send that processor.
"""
local_map = self.local_cell_map
global_map = self.global_cell_map
pc = self.parallel_controller
cell_list_to_send = {}
for cid in local_map:
neighbor_ids = []
py_construct_immediate_neighbor_list(cid, neighbor_ids,
include_self=False)
# handle non-overlapping regions
for neighbor_id in neighbor_ids:
if neighbor_id in global_map:
owning_pids = list(global_map[neighbor_id])
for pid in owning_pids:
if not pid in cell_list_to_send:
cell_list_to_send[pid] = set([cid])
else:
cell_list_to_send[pid].update([cid])
# handle overlapping regions
conflicting_pids = list(global_map[cid])
if len(conflicting_pids) > 0:
for neighbor_id in neighbor_ids:
if neighbor_id in local_map:
for pid in conflicting_pids:
if not pid in cell_list_to_send:
cell_list_to_send[pid] = set([cid])
else:
cell_list_to_send[pid].update([cid])
return cell_list_to_send
def resolve_conflicts(self):
pass
class SimpleBlockManager(ParallelManager):
"""A parallel manager based on blocks.
Particles are binned locally with a bin/cell size equal to some
factor times the maximum smoothing length of the particles. The
resulting cell structure is used to determine neighboring
processors using the ProcessorMap and only a single layer of cells
is communicated.
"""
def __init__(self, block_scale_factor=6.0):
"""Constructor.
Parameters:
-----------
block_scale_factor : double
The scale factor to determine the bin size. The smoothing length
is chosen as: block_scale_factor * glb_max_h
The block_scale_factor should be greater than or equal to the
largest kernel radius for all possibly different kernels used
in a simulation.
"""
self.parallel_controller = ParallelController()
self.processor_map = ProcessorMap(self.parallel_controller)
self.rank = self.parallel_controller.rank
self.block_scale_factor=block_scale_factor
self.comm = self.parallel_controller.comm
self.size = self.parallel_controller.num_procs
self.rank = self.parallel_controller.rank
self.glb_bounds_min = [0, 0, 0]
self.glb_bounds_max = [0, 0, 0]
self.glb_min_h = 0
self.glb_max_h = 0
self.local_bounds_min = [0,0,0]
self.local_bounds_max = [0,0,0]
self.local_min_h = 0
self.local_max_h = 0
self.local_cell_map = {}
self.global_cell_map = {}
##########################################################################
# Public interface
##########################################################################
def initialize(self, particles):
"""Initialize the block manager.
The particle arrays are set and the cell manager is created
after the cell/block size is computed.
"""
self.particles = particles
self.arrays = particles.arrays
# setup the cell manager
self._set_dirty()
self._compute_block_size()
self._setup_cell_manager()
def update(self):
"""Parallel update.
After a call to this function, each processor has it's local
and remote particles necessary for a simulation.
"""
cm = self.cm
pmap = self.processor_map
# remove all remote particles
self._remove_remote_particles()
# bin the particles
self._rebin_particles()
# update cell map
pmap.global_update(cm.cells_dict)
# set the array pids
self._set_array_pid()
# exchange neighbor info
self._exchange_neighbor_particles()
# reset the arrays to dirty so locally we are unaffected
self._set_dirty()
def update_remote_particle_properties(self, props):
self.update()
###########################################################################
# Non public interface
###########################################################################
def _add_neighbor_particles(self, data):
"""Append remote particles to the local arrays.
Parameters:
-----------
data : dictionary
A dictionary keyed on processor id with value equal to a list of
particle arrays, corresponding to the local arrays in `arrays`
that contain remote particles from that processor.
"""
arrays = self.arrays
numarrays = len(arrays)
remote_particle_indices = []
for i in range(numarrays):
num_local = arrays[i].get_number_of_particles()
remote_particle_indices.append( [num_local, num_local] )
for pid in data:
if not pid == self.rank:
parray_list = data[pid]
for i in range(numarrays):
src = parray_list[i]
dst = arrays[i]
remote_particle_indices[i][1] += src.get_number_of_particles()
dst.append_parray(src)
self.remote_particle_indices = remote_particle_indices
def _get_communication_data(self, cell_list_to_send):
"""Get the particle array data corresponding to the cell list
that needs to be communicated. """
numarrays = len(self.arrays)
cm = self.cm
data = {}
for pid, cell_list in cell_list_to_send.iteritems():
parray_list = []
for i in range(numarrays):
parray_list.append(ParticleArray())
for cid in cell_list:
cell = cm.cells_dict[cid]
index_lists = []
cell.get_particle_ids(index_lists)
for i in range(numarrays):
src = self.arrays[i]
dst = parray_list[i]
index_array = index_lists[i]
pa = src.extract_particles(index_array)
# set the local and tag values
pa.local[:] = 0
pa.tag[:] = get_dummy_tag()
dst.append_parray(pa)
dst.set_name(src.name)
data[pid] = parray_list
return data
for cid, pids in send_cells_to.iteritems():
if len(pids) > 0:
parray_list = []
cell = cm.cells_dict[cid]
index_lists = []
cell.get_particle_ids(index_lists)
for i in range(numarrays):
parray_list.append( ParticleArray() )
src = self.arrays[i]
dst = parray_list[i]
index_array = index_lists[i]
pa = src.extract_particles(index_array)
# set the local and tag values
pa.local[:] = 0
pa.tag[:] = get_dummy_tag()
dst.append(pa)
dst.set_name(src.name)
for pid in pids:
to_send[pid] = parray_list
def _exchange_neighbor_particles(self):
"""Send the cells to neighboring processors."""
pc = self.parallel_controller
pmap = self.processor_map
cm = self.cm
# get the list of cells to send per processor from the processor map
cell_list_to_send = pmap.get_cell_list_to_send()
self.cell_list_to_send = cell_list_to_send
# get the actual particle data to send from the cell manager
data = self._get_communication_data(cell_list_to_send)
# share the data
recv = share_data(self.rank, data.keys(), data, pc.comm, multi=True)
# add the neighbor particles
self._add_neighbor_particles(recv)
def _rebin_particles(self):
"""Locally recompute the cell structure."""
cm = self.cm
# set the particle arrays to dirty
self._set_dirty()
# compute the block size
self._compute_block_size()
# set the cell size and bin
cm.cell_size = self.block_size
cm.rebin_particles()
# remove any empty cells
cm.delete_empty_cells()
def _compute_block_size(self):
"""Compute the block size.
The block size is chosen as some scale factor times the global
largest smoothing length.
"""
self._update_global_properties()
self.block_size = self.block_scale_factor*self.glb_max_h
def _setup_cell_manager(self):
"""Set the cell manager used for binning."""
self.cm = CellManager(arrays_to_bin=self.arrays,
min_cell_size=self.block_size,
max_cell_size=self.block_size,
initialize=True)
def _set_dirty(self):
"""Set the dirty bit for each particle array."""
for array in self.arrays:
array.set_dirty(True)
def _remove_remote_particles(self):
"""Remove all remote particles."""
for array in self.arrays:
to_remove = arange_long(array.num_real_particles,
array.get_number_of_particles())
array.remove_particles(to_remove)
def _set_array_pid(self):
"""Set the processor id for each particle array."""
for array in self.arrays:
array.set_pid(self.rank)
def _barrier(self):
"""Wait till all processors reach this point."""
self.parallel_controller.comm.barrier()
def _update_global_properties(self):
""" Exchange bound and smoothing length information among all
processors.
Notes:
------
At the end of this call, the global min and max values for the
coordinates and smoothing lengths are stored in the attributes
glb_bounds_min/max, glb_min/max_h
"""
data_min = {'x':0, 'y':0, 'z':0, 'h':0}
data_max = {'x':0, 'y':0, 'z':0, 'h':0}
for key in data_min.keys():
mi, ma = self._find_min_max_of_property(key)
data_min[key] = mi
data_max[key] = ma
self.local_bounds_min[0] = data_min['x']
self.local_bounds_min[1] = data_min['y']
self.local_bounds_min[2] = data_min['z']
self.local_bounds_max[0] = data_max['x']
self.local_bounds_max[1] = data_max['y']
self.local_bounds_max[2] = data_max['z']
self.local_min_h = data_min['h']
self.local_max_h = data_max['h']
pc = self.parallel_controller
glb_min, glb_max = pc.get_glb_min_max(data_min, data_max)
self.glb_bounds_min[0] = glb_min['x']
self.glb_bounds_min[1] = glb_min['y']
self.glb_bounds_min[2] = glb_min['z']
self.glb_bounds_max[0] = glb_max['x']
self.glb_bounds_max[1] = glb_max['y']
self.glb_bounds_max[2] = glb_max['z']
self.glb_min_h = glb_min['h']
self.glb_max_h = glb_max['h']
logger.info('(%d) bounds : %s %s'%(pc.rank, self.glb_bounds_min,
self.glb_bounds_max))
logger.info('(%d) min_h : %f, max_h : %f'%(pc.rank, self.glb_min_h,
self.glb_max_h))
def _find_min_max_of_property(self, prop_name):
""" Find the minimum and maximum of the property among all arrays
Parameters:
-----------
prop_name -- the property name to find the bounds for
"""
min = 1e20
max = -1e20
num_particles = 0
for arr in self.arrays:
if arr.get_number_of_particles() == 0:
continue
else:
num_particles += arr.get_number_of_particles()
min_prop = numpy.min(arr.get(prop_name))
max_prop = numpy.max(arr.get(prop_name))
if min > min_prop:
min = min_prop
if max < max_prop:
max = max_prop
return min, max
| Python |
"""API module to simplify import of common names from pysph.parallel package"""
from parallel_cell import ParallelCellManager, ProcessorMap
| Python |
""" Contains class to perform load balancing using METIS[1]/SCOTCH[2]
[1] METIS: http://glaros.dtc.umn.edu/gkhome/views/metis
[2] SCOTCH: http://www.labri.fr/perso/pelegrin/scotch/
Note: Either of METIS/SCOTCH is acceptable. Installing one of these is enough.
First METIS is attempted to load and if it fails SCOTCH is tried. SCOTCH is
used in the METIS compatibility mode. Only the function `METIS_PartGraphKway`
is used from either of the libraries
"""
# logging imports
import logging
logger = logging.getLogger()
# local imports
from pysph.base.cell import py_construct_immediate_neighbor_list
from load_balancer_mkmeans import LoadBalancerMKMeans
import sys
import ctypes
from ctypes import c_int32 as c_int
if sys.platform.startswith('linux'):
try:
libmetis = ctypes.cdll.LoadLibrary('libmetis.so')
except OSError:
try:
libmetis = ctypes.cdll.LoadLibrary('libscotchmetis.so')
except OSError:
raise ImportError('could not load METIS library, try installing '
'METIS/SCOTCH and ensure it is in LD_LIBRARY_PATH')
elif sys.platform.startswith('win'):
try:
libmetis = ctypes.cdll.LoadLibrary('metis')
except OSError:
try:
libmetis = ctypes.cdll.LoadLibrary('scotchmetis')
except OSError:
raise ImportError('could not load METIS library, try installing '
'METIS/SCOTCH and ensure it is in LD_LIBRARY_PATH')
else:
raise ImportError('sorry, donno how to use ctypes (for METIS/SCOTCH'
'load_balancing) on non-linux/win platform, any help appreciated')
METIS_PartGraphKway = libmetis.METIS_PartGraphKway
c_int_p = ctypes.POINTER(c_int)
METIS_PartGraphKway.argtypes = [c_int_p, c_int_p, c_int_p, c_int_p, c_int_p,
c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p]
def cargs_from_wadj(xadj, adjncy, vwgt, bid_index, nparts):
""" return the ctype arguments for metis from the adjacency data
Parameters:
-----------
- xadj,adjncy,vwgt: lists containing adjacency data in CSR format as
required by :func:`METIS_PartGraphKway` (check METIS manual)
- bid_index: dict mapping bid to index in the adjacency data
- nparts: number of partitions to make of the graph
Returns:
--------
- n, xadj, adjncy, vwgt, adjwgt, wgtflag, numflag, nparts, options,
edgecut, part: the arguments for the :func:`METIS_PartGraphKway`
functions in ctype data format (all are pointers to c_int32)
"""
n = len(xadj)-1
c_n = (c_int*1)(n)
c_numflag = (c_int*1)()
c_adjwgt = None
c_nparts = (c_int*1)(nparts)
c_options = (c_int*5)()
c_edgecut = (c_int*1)()
c_part = (c_int*n)()
c_xadj = (c_int*(n+1))()
c_xadj[:] = xadj
c_adjncy = (c_int*len(adjncy))()
c_adjncy[:] = adjncy
if vwgt:
c_vwgt = (c_int*n)()
c_vwgt[:] = vwgt
c_wgtflag = (c_int*1)(2)
else:
c_vwgt = None
c_wgtflag = (c_int*1)(0)
return (c_n, c_xadj, c_adjncy, c_vwgt, c_adjwgt, c_wgtflag, c_numflag,
c_nparts, c_options, c_edgecut, c_part)
def wadj_from_adj_list(adj_list):
""" return vertex weights and adjacency information from adj_list
as returned by :func:`adj_list_from_blocks` """
bid_index = {}
xadj = [0]
adjncy = []
vwgt = []
for i,tmp in enumerate(adj_list):
bid_index[tmp[0]] = i
for bid, adjl, np in adj_list:
adjncy.extend((bid_index[b] for b in adjl))
xadj.append(len(adjncy))
vwgt.append(np)
return xadj, adjncy, vwgt, bid_index
def adj_list_from_blocks(block_proc, proc_block_np):
""" return adjacency list information for use by METIS partitioning
Arguments:
----------
- block_proc: dict mapping bid:proc
- proc_block_map: list of dict bid:np, in sequence of the process to
which block belongs
Returns:
--------
- adj_list: list of 3-tuples, one for each block in proc-block_np
The 3-tuple consists of (bid, adjacent bids, num_particles in bid)
"""
adj_list = []
nbrs = []
i = 0
for blocks in proc_block_np:
for bid, np in blocks.iteritems():
nbrs[:] = []
adjl = []
py_construct_immediate_neighbor_list(bid, nbrs, False)
for nbr in nbrs:
if nbr in block_proc:
adjl.append(nbr)
adj_list.append((bid, adjl, np))
i += 1
return adj_list
def lb_metis(block_proc, proc_block_np):
""" Partition the blocks in proc_block_np using METIS
Arguments:
----------
- block_proc: dict mapping bid:proc
- proc_block_map: list of dict bid:np, in sequence of the process to
which block belongs
Returns:
--------
- block_proc: dict mapping bid:proc for the new partitioning generated
by METIS
"""
adj_list = adj_list_from_blocks(block_proc, proc_block_np)
xadj, adjncy, vwgt, bid_index = wadj_from_adj_list(adj_list)
c_args = cargs_from_wadj(xadj, adjncy, vwgt, bid_index, len(proc_block_np))
METIS_PartGraphKway(*c_args)
ret = c_args[-1]
ret_block_proc = {}
for bid,bindex in bid_index.iteritems():
ret_block_proc[bid] = ret[bindex]
return ret_block_proc
###############################################################################
# `LoadBalancerMetis` class.
###############################################################################
class LoadBalancerMetis(LoadBalancerMKMeans):
def __init__(self, **args):
LoadBalancerMKMeans.__init__(self, **args)
self.method = 'serial_metis'
def load_balance_func_serial_metis(self, **args):
""" serial load balance function which uses METIS to do the partitioning
calls the :class:Loadbalancer :meth:`load_balance_func_serial`
"""
self.load_balance_func_serial('metis', **args)
def load_redistr_metis(self, block_proc, proc_block_np, **args):
""" function to redistribute the cells amongst processes using METIS
This is called by :class:Loadbalancer :meth:`load_balance_func_serial`
"""
block_proc = lb_metis(block_proc, proc_block_np)
self.particles_per_proc = [0]*len(proc_block_np)
block_np = {}
for b in proc_block_np:
block_np.update(b)
for bid,proc in block_proc.iteritems():
self.particles_per_proc[proc] += block_np[bid]
self.balancing_done = True
return block_proc, self.particles_per_proc
###############################################################################
| Python |
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
if solver.HAS_CL:
import pyopencl as cl
else:
try:
import nose.plugins.skip as skip
reason = "PyOpenCL not installed"
raise skip.SkipTest(reason)
except ImportError:
pass
import numpy
import unittest
from os import path
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
class FunctionTestCase(unittest.TestCase):
""" Simple test for the NBodyForce """
def runTest(self):
pass
def setUp(self):
""" The setup consists of four particles placed at the
vertices of a unit square. The force function to be tested is:
..math::
f_i = \sum_{j=1}^{4} \frac{m_j}{|x_j - x_i|^3 +
\eps}(x_j - x_i)
The mass of each particle is 1
"""
self.np = 4
# define the particle properties here
x = numpy.array([0, 0, 1, 1], numpy.float64)
y = numpy.array([0, 1, 1, 0], numpy.float64)
z = numpy.zeros_like(x)
m = numpy.ones_like(x)
u = numpy.array([1, 0, 0, -1], numpy.float64)
p = numpy.array([0, 0, 1, 1], numpy.float64)
self.kernel = base.CubicSplineKernel(dim=2)
# create a ParticleArray with double precision
self.pa = pa = base.get_particle_array(name="test", x=x, y=y, z=z,
m=m, u=u, p=p)
# create a particles instance
self.particles = base.Particles([pa,])
self.cl_particles = base.CLParticles(
arrays=[self.pa,],
domain_manager_type=CLDomain.DomainManager,
cl_locator_type=CLLocator.AllPairNeighborLocator)
# define the function here
#self.func = func = sph.NBodyForce.get_func(pa, pa)
if solver.HAS_CL:
self.ctx = ctx = solver.create_some_context()
self.q = q = cl.CommandQueue(ctx)
self.setup()
def setup(self):
pass
def get_reference_solution(self):
""" Evaluate the force on each particle manually """
# Define the reference solution here
raise NotImplementedError
def setup_calcs(self):
pa = self.pa
# create a Cython Calc
calc = sph.SPHCalc( self.particles, [pa,], pa,
self.kernel, [self.func,], ['rho'] )
self.calc = calc
# create an OpenCL Calc
cl_calc = sph.CLCalc( self.cl_particles, [pa,], pa,
self.kernel, [self.func,], ['rho'] )
self.cl_calc = cl_calc
def _test(self, precision, nd):
""" Test the PySPH solution """
pa = self.pa
pa.set_cl_precision(precision)
# setup the calcs
self.setup_calcs()
# setup OpenCL
self.cl_calc.setup_cl(self.ctx)
# get the reference solution
reference_solution = self.get_reference_solution()
self.calc.sph()
cython_tmpx = pa._tmpx.copy()
cython_tmpy = pa._tmpy.copy()
cython_tmpz = pa._tmpz.copy()
pa._tmpx[:] = -1
pa._tmpy[:] = -1
pa._tmpz[:] = -1
self.cl_calc.sph()
pa.read_from_buffer()
opencl_tmpx = pa._tmpx
opencl_tmpy = pa._tmpy
opencl_tmpz = pa._tmpz
for i in range(self.np):
self.assertAlmostEqual(reference_solution[i].x, cython_tmpx[i],nd)
self.assertAlmostEqual(reference_solution[i].y, cython_tmpy[i],nd)
self.assertAlmostEqual(reference_solution[i].z, cython_tmpz[i],nd)
self.assertAlmostEqual(reference_solution[i].x, opencl_tmpx[i],nd)
self.assertAlmostEqual(reference_solution[i].y, opencl_tmpy[i],nd)
self.assertAlmostEqual(reference_solution[i].z, opencl_tmpz[i],nd)
| Python |
"""
Module containing some data required for tests of the sph module.
"""
# standard imports
import numpy
# local imports
from pysph.base.particle_array import *
def generate_sample_dataset_1():
"""
Generate test test data.
Look at image sph_test_data1.png
"""
x = numpy.array([-1.0, 0.0, 1.0, -1.0, 0.0, 1.0, -1.0, 0.0, 1.0])
y = numpy.array([-1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0])
z = numpy.array([0., 0, 0, 0, 0, 0, 0, 0, 0])
h = numpy.array([1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1])
m = numpy.array([1., 1, 1, 1, 1, 1, 1, 1, 1])
rho = numpy.array([1., 1, 1, 1, 1, 1, 1, 1, 1])
u = numpy.zeros(9)
v = numpy.zeros(9)
w = numpy.zeros(9)
parr1 = ParticleArray(name='parr1', **{'x':{'data':x}, 'y':{'data':y},
'z':{'data':z}, 'h':{'data':h},
'm':{'data':m},
'rho':{'data':rho},
'velx':{'data':u},
'v':{'data':v},
'w':{'data':w}})
return [parr1]
def generate_sample_dataset_2():
"""
Generate test data.
Look at image sph_test_data2.png.
"""
x = numpy.array([0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5])
y = numpy.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.5])
z = numpy.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.5])
h = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
m = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
rho = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
t = numpy.zeros(9)
parr1 = ParticleArray(name='parr1', **{'x':{'data':x},
'y':{'data':y},
'z':{'data':z},
'm':{'data':m},
'rho':{'data':rho},
'h':{'data':h},
't':{'data':t}})
return [parr1]
| Python |
"""API module to simplify import of common names from pysph.sph package"""
#Import from calc
from sph_calc import SPHCalc, CLCalc
from sph_func import SPHFunction, SPHFunctionParticle, CSPHFunctionParticle
############################################################################
# IMPORT FUNCTIONS
############################################################################
#Import basic functions
from funcs.basic_funcs import SPHGradient, \
SPHLaplacian, CountNeighbors, SPH as SPHInterpolation,\
VelocityGradient3D, VelocityGradient2D
#Import boundary functions
from funcs.boundary_funcs import MonaghanBoundaryForce, LennardJonesForce, \
BeckerBoundaryForce
#Import density functions
from funcs.density_funcs import SPHRho, SPHDensityRate
#Import Energy functions
from funcs.energy_funcs import EnergyEquation, EnergyEquationAVisc,\
EnergyEquationNoVisc, ArtificialHeat, \
EnergyEquationWithSignalBasedViscosity
#Import viscosity functions
from funcs.viscosity_funcs import MonaghanArtificialViscosity, \
MorrisViscosity, MomentumEquationSignalBasedViscosity
#Import pressure functions
from funcs.pressure_funcs import SPHPressureGradient, MomentumEquation
#Positon Steppers
from funcs.position_funcs import PositionStepping
#Import XSPH functions
from funcs.xsph_funcs import XSPHDensityRate, XSPHCorrection
#Import Equation of state functions
from funcs.eos_funcs import IdealGasEquation, TaitEquation, \
IsothermalEquation, MieGruneisenEquation
#Import external force functions
from funcs.external_force import GravityForce, VectorForce, MoveCircleX,\
MoveCircleY, NBodyForce
#Import ADKE functions
from funcs.adke_funcs import ADKEPilotRho, ADKESmoothingUpdate,\
SPHVelocityDivergence as VelocityDivergence, ADKEConductionCoeffUpdate,\
SetSmoothingLength
# Import stress functions
from funcs.stress_funcs import HookesDeviatoricStressRate2D, \
HookesDeviatoricStressRate3D, MomentumEquationWithStress2D,\
MonaghanArtificialStress, MonaghanArtStressAcc, \
EnergyEquationWithStress2D, VonMisesPlasticity2D
from funcs.stress_funcs import get_K, get_nu, get_G
# Import test funcs
from funcs.test_funcs import ArtificialPotentialForce
# Import GSPH funcs
from funcs.gsph_funcs import GSPHMomentumEquation, GSPHEnergyEquation,\
GSPHPositionStepping
############################################################################
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.