code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
################################################################################
##
## Photivo
##
## Copyright (C) 2013 Jos De Laender <jos@de-laender.be>
##
## This file is part of Photivo.
##
## Photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## Photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import tempfile
import subprocess
import shutil
################################################################################
# Constants.
ptNoAttrs = ''
ptBold = ''
ptNegative = ''
ptBlack = ''
ptRed = ''
ptGreen = ''
ptYellow = ''
ptBlue = ''
ptMagenta = ''
ptCyan = ''
ptWhite = ''
ptBoldRed = ''
ptBoldGreen = ''
ptBoldYellow = ''
ptBoldBlue = ''
ptBoldMagenta= ''
ptBoldCyan = ''
ptBoldWhite = ''
# Do we have colors in win32 ?
ptHaveColors = True
if sys.platform in ['win32'] :
ptHaveColors = False
try:
from colorama import init
init()
ptHaveColors = True
except :
print '\nTIP : Installing colorama would give you coloured output.\n'
pass
if ptHaveColors and sys.stdout.isatty() :
ptNoAttrs = '\033[0m'
ptBold = '\033[1m'
ptNegative = '\033[7m'
ptBlack = '\033[30m'
ptRed = '\033[31m'
ptGreen = '\033[32m'
ptYellow = '\033[33m'
ptBlue = '\033[34m'
ptMagenta = '\033[35m'
ptCyan = '\033[36m'
ptWhite = '\033[37m'
ptBoldRed = '\033[1;31m'
ptBoldGreen = '\033[1;32m'
ptBoldYellow = '\033[1;33m'
ptBoldBlue = '\033[1;34m'
ptBoldMagenta= '\033[1;35m'
ptBoldCyan = '\033[1;36m'
ptBoldWhite = '\033[1;37m'
################################################################################
# Prints to screen and to log. With color to screen.
def ptPrintLog(DoPrint,LogFile,Color,Message):
if DoPrint :
MyMessage = Color + Message + ptNoAttrs
print MyMessage
LogFile.write(Message + '\n')
return None
################################################################################
# Check for sufficient GCC. Simply returns true if OK.
def ptCheckGCCVersion(BuildEnv,MinVersion):
ptPrintLog(True,BuildEnv['PT_LOGFILE'],
ptBoldBlue,
'Checking for GCC >= ' + MinVersion + ' ... ')
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in BuildEnv['ENV'].keys():
os.environ[Key] = BuildEnv['ENV'][Key]
ptCC = BuildEnv['CC']
ptCXX = BuildEnv['CXX']
ptCCVersion = os.popen(ptCC + ' -dumpversion').read().rstrip().split('.')
ptCXXVersion = os.popen(ptCXX + ' -dumpversion').read().rstrip().split('.')
# Restpre env
os.environ.clear()
os.environ.update(ptSavedEnviron)
ptMinVersion = MinVersion.split('.')
if (ptCCVersion[0] > ptMinVersion[0] and ptCXXVersion[0] > ptMinVersion[0]):
return True;
if (ptCCVersion[0] < ptMinVersion[0] or ptCXXVersion[0] < ptMinVersion[0]):
return False;
if (ptCCVersion[1] > ptMinVersion[1] and ptCXXVersion[1] > ptMinVersion[1]):
return True;
if (ptCCVersion[1] < ptMinVersion[1] or ptCXXVersion[1] < ptMinVersion[1]):
return False;
# Some report 4.7 for 4.7.2
try:
if (ptCCVersion[2] > ptMinVersion[2] and ptCXXVersion[2] > ptMinVersion[2]):
return True;
if (ptCCVersion[2] < ptMinVersion[2] or ptCXXVersion[2] < ptMinVersion[2]):
return False;
except IndexError:
pass
return True;
################################################################################
# Get GCC/CXX version
def ptGetGCCVersion(BuildEnv) :
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in BuildEnv['ENV'].keys():
os.environ[Key] = BuildEnv['ENV'][Key]
ptCC = BuildEnv.WhereIs(BuildEnv['CC'])
ptCXX = BuildEnv.WhereIs(BuildEnv['CXX'])
ptCCVersion = os.popen(ptCC + ' -dumpversion').read().rstrip()
ptCXXVersion = os.popen(ptCXX + ' -dumpversion').read().rstrip()
# Restore env
os.environ.clear()
os.environ.update(ptSavedEnviron)
return[ptCCVersion,ptCXXVersion]
################################################################################
# Check hg
def ptCheckHg(Context):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for hg ... ')
Ret=Context.TryAction('hg')[0]
Context.Result(Ret)
return Ret
################################################################################
# Get AppVersion
def ptGetAppVersion():
ptHgRev = os.popen('hg identify').read()[:11]
ptChanged = os.popen('hg identify').read()[12]
ptAppVer = os.popen(
'hg log --rev ' + ptHgRev + \
' --template "{date|shortdate} (rev {node|short})"').read()
return ptAppVer + ptChanged
################################################################################
# Get the package version and flags for packages handled by pkg-config
def ptGetPKGOutput(Context,Name):
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in Context.env['ENV'].keys():
os.environ[Key] = Context.env['ENV'][Key]
ptPkgConfig = Context.env['PT_CROSS'] + 'pkg-config'
ptVersion = os.popen(ptPkgConfig + ' --modversion ' + Name).read().rstrip()
ptFlags = os.popen(ptPkgConfig + ' --cflags --libs ' + Name).read().rstrip()
# Restore env
os.environ.clear()
os.environ.update(ptSavedEnviron)
return [ptVersion,ptFlags]
################################################################################
def ptCheckPKGConfig(Context,MinVersion):
ptPkgConfig = Context.env['PT_CROSS'] + 'pkg-config'
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for ' + ptPkgConfig + ' ... ')
Ret=Context.TryAction(
ptPkgConfig + ' --atleast-pkgconfig-version=' + MinVersion)[0]
Context.Result(Ret)
return Ret
################################################################################
def ptCheckPKG(Context,Name):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for ' + Name + '... ')
ptPkgConfig = Context.env['PT_CROSS'] + 'pkg-config'
if sys.platform in ['win32'] :
ptCommand = ptPkgConfig + ' --exists %s' % Name
# WIN32 shell escape of >
ptCommand = ptCommand.replace(">","^>")
else :
ptCommand = ptPkgConfig + ' --exists \'%s\'' % Name
Ret = Context.TryAction(ptCommand)[0]
Context.Result(Ret)
return Ret
################################################################################
def ptCheckLibWithHeader(Context,Lib,Header,Language):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for ' + Lib + ' (' + Header + ')... ')
Ret = Context.sconf.CheckLibWithHeader(Lib,Header,Language)
Context.Result(Ret)
return Ret
################################################################################
# custom check on libjpeg version
def ptCheckLibJpeg(Context,MinVersion,MaxVersion):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for libjpeg between ' + str(MinVersion) +
' and ' + str(MaxVersion) + '... ')
ptProgram = """
#include <stdlib.h>
#include <stdio.h>
#define JPEG_LIB_VERSION 0
#include <jpeglib.h>
int main() {
printf("%d",JPEG_LIB_VERSION);
return 0;
}
"""
Ret = Context.TryCompile(ptProgram, '.c')
if Ret == 0:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Failing test. Cannot compile test program:')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,ptProgram)
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
Ret = Context.TryRun(ptProgram, '.c')
if Ret[0] == 0:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Failing test. Cannot run test program:')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,ptProgram)
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
ptVersion = int(Ret[1])
OK = not (ptVersion < MinVersion or ptVersion > MaxVersion)
if not OK:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'libjpeg version : ' + str(Ret[1]) + ' should be between ' +
str(MinVersion) + ' and ' + str(MaxVersion))
Context.Result(OK)
return OK
################################################################################
def ptCheckQt(Context,MinVersion):
ptPrintLog(True,Context.env['PT_LOGFILE'],
ptBoldBlue,
'Checking for Qt >= ' + MinVersion + '... ')
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in Context.env['ENV'].keys():
os.environ[Key] = Context.env['ENV'][Key]
# Locate qmake. Taking QT4DIR into account.
qmake_1 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake'
qmake_2 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake.exe'
qmake_3 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake-qt4'
qmake_4 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake-qt4.exe'
qmakes = [qmake_1,qmake_2,qmake_3,qmake_4]
qmake = ''
for qm in qmakes :
if os.path.exists(qm) :
qmake = qm
break
if not qmake:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Cannot locate qmake.')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
# Locate make
make = Context.env.WhereIs('make')
if not make:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Cannot locate make.')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
# Check version
ptQtVersion = \
os.popen(qmake + ' -query QT_VERSION').read().rstrip().split('.')
ptQtMinVersion = MinVersion.split('.')
if ptQtVersion[0] < ptQtMinVersion[0] :
Context.Result(False)
return False
if ptQtVersion[0] > ptQtMinVersion[0] :
Context.Result(True)
return True
if ptQtVersion[1] < ptQtMinVersion[1] :
Context.Result(False)
return False
if ptQtVersion[1] > ptQtMinVersion[1] :
Context.Result(True)
return True
if ptQtVersion[2] < ptQtMinVersion[2] :
Context.Result(False)
return False
if ptQtVersion[2] > ptQtMinVersion[2] :
Context.Result(True)
return True
Context.Result(True)
return True
################################################################################
# Determine Qt Compile and Link parameters via a qmake run on test program.
def ptGetQtOutput(Context):
# Make sure we work with correct and minimum os.environ. Save previous.
ptSavedEnviron = dict(os.environ)
os.environ.clear()
for Key in Context.env['ENV'].keys():
os.environ[Key] = Context.env['ENV'][Key]
# Locate qmake. Taking QT4DIR into account.
qmake_1 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake'
qmake_2 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake.exe'
qmake_3 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake-qt4'
qmake_4 = Context.env['QT4DIR'] + os.sep + 'bin' + os.sep + 'qmake-qt4.exe'
qmakes = [qmake_1,qmake_2,qmake_3,qmake_4]
qmake = ''
for qm in qmakes :
if os.path.exists(qm) :
qmake = qm
break
if not qmake:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Cannot locate qmake.')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
# Locate make
make = Context.env.WhereIs('make')
if not make:
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
'Cannot locate make.')
ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,'Giving up.')
Exit(1)
# Version
ptQtVersion = os.popen(qmake + ' -query QT_VERSION').read().rstrip()
# Analyze output of qmake/make combo
ptCurDir = os.getcwd()
ptTmpDir = tempfile.mkdtemp()
ptProgram = """
int main() {
return 0;
}
"""
with open(ptTmpDir + os.sep + 'FooTest.cpp','w') as f :
f.write(ptProgram)
with open(ptTmpDir + os.sep + 'FooTest.pro','w') as f :
f.write('CONFIG -= DEBUG\n')
f.write('CONFIG -= RELEASE\n')
if Context.env['PT_RELEASE'] :
f.write('CONFIG += RELEASE\n')
else :
f.write('CONFIG += DEBUG\n')
f.write('QT += core\n')
f.write('QT += gui\n')
f.write('QT += network\n')
f.write('SOURCES = FooTest.cpp\n')
os.chdir(ptTmpDir)
os.popen(qmake)
ptMakeOutput = os.popen(make).read().rstrip().split('\n')
# Analyze output to determine flags.
ptCompileFlags = ''
ptLinkFlags = ''
for Line in ptMakeOutput:
if 'FooTest.cpp' in Line :
# Assuming compile step.
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nFoo compile line : ' + Line + '\n')
ptLocalEnv = Environment()
ptParsedFlags = ptLocalEnv.ParseFlags(Line)
for ptFlag in ptParsedFlags['CPPDEFINES'] :
ptCompileFlags += ' ' + '-D' + ptFlag
for ptFlag in ptParsedFlags['CCFLAGS'] :
if ptFlag.startswith('-f') or ptFlag.startswith('-m'):
ptCompileFlags += ' ' + ptFlag
for ptPath in ptParsedFlags['CPPPATH'] :
if ptPath in ['.','debug','release'] :
continue
ptCompileFlags += ' -I' + os.path.abspath(ptPath).replace("\\","/")
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nptParsedFlags : ' + str(ptParsedFlags) + '\n')
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nptCompileFlags : ' + ptCompileFlags + '\n')
elif 'FooTest' in Line :
# Assuming link step.
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nFoo link line : ' + Line + '\n')
ptLocalEnv = Environment()
ptParsedFlags = ptLocalEnv.ParseFlags(Line)
for ptFlag in ptParsedFlags['LINKFLAGS'] :
ptLinkFlags += ' ' + ptFlag
for ptFlag in ptParsedFlags['CCFLAGS'] :
if ptFlag.startswith('-f') or ptFlag.startswith('-m'):
ptLinkFlags += ' ' + ptFlag
for ptPath in ptParsedFlags['LIBPATH'] :
ptLinkFlags += ' -L' + os.path.abspath(ptPath).replace("\\","/")
for ptLib in ptParsedFlags['LIBS'] :
try:
ptLinkFlags += ' -l' + ptLib
except TypeError:
# foo.exe,foo.o references.
pass
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nptParsedFlags : ' + str(ptParsedFlags) + '\n')
#ptPrintLog(True,Context.env['PT_LOGFILE'],ptBoldRed,
# '\nptLinkFlags : ' + ptLinkFlags + '\n')
# Back to dir we were.
os.chdir(ptCurDir)
# Remove our temp dir.
shutil.rmtree(ptTmpDir)
# Restore env
os.environ.clear()
os.environ.update(ptSavedEnviron)
return [ptQtVersion,ptCompileFlags + ' ' + ptLinkFlags]
################################################################################
# Boilerplate to log commands nicely to screen and completely to log file.
def ptPrintCmdLine(s, target, src, env):
# Always to a log file. (and with an extra linefeed to 'see' commands)
LogFile = env['PT_LOGFILE']
LogFile.write('\n' + s + '\n')
ShortText = 'Building object'
# 'Recognized' commands ?
if 'DSCONS_CXX' in s:
ShortText = ptGreen + 'Building CXX object'
elif 'DSCONS_CC' in s:
ShortText = ptGreen + 'Building C object'
elif 'DSCONS_LINK' in s:
ShortText = ptBoldMagenta + 'Linking'
elif 'DSCONS_UIC' in s:
ShortText = ptBoldBlue + 'Generating UIC object'
elif 'DSCONS_MOC' in s:
ShortText = ptBoldBlue + 'Generating MOC object'
elif 'DSCONS_RCC' in s:
ShortText = ptBoldBlue + 'Generating RCC object'
elif 'DSCONS_WINDRES' in s:
ShortText = ptBoldBlue + 'Generating Windows resource'
elif s.endswith('.lnk') :
ShortText = ptBoldMagenta + 'Linking'
elif s.startswith('Creating'):
ShortText = ptBoldBlue + 'Creating'
else:
# Install is a kind of exception. Also it points to a func.
# We *assume* fallthrough is install. But that's a very shaky one.
# XXX TODO
#print 'DEBUG FOO : ' + s
ShortText = ptBoldMagenta + 'Creating'
MyMessage = ''
if not env['PT_VERBOSE']:
MyMessage = ShortText + ' ' + ' and '.join([str(x) for x in target])
else:
MyMessage = s
MyMessage += ptNoAttrs
print MyMessage
return None
################################################################################
# Exit function ensures color reset.
def ptLastCalledAtExit():
print ptBoldYellow + \
'Bye from the scons build program for Photivo' + \
ptNoAttrs
return None
################################################################################
# AtExit that joins the stderr collected in stderr.log into the logfile.
def ptAtExit(LogFile):
try:
LogFile.write('\nThe stderr output is :\n')
sys.stderr.flush() # Make sure the stderr is complete.
StdErrFile = open('stderr.log','r')
LogFile.write(StdErrFile.read())
StdErrFile.close()
except:
pass
return None
################################################################################
# Basically from Scons wiki : Spawn which echos stdout/stderr from the child.
# ptFoo is mine.
def ptEchoSpawn( sh, escape, cmd, args, env ):
ptFoo = ' '.join(args)
# convert env from unicode strings
asciienv = {}
for key, value in env.iteritems():
asciienv[key] = str(value)
p = subprocess.Popen(
#args,
ptFoo,
env=asciienv,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
# Does this screw up the relative order of the two?
sys.stdout.write(stdout)
sys.stderr.write(stderr)
return p.returncode
################################################################################
# Local (changed) copy of TempFileMunge
class ptTempFileMunge(object):
"""A callable class. You can set an Environment variable to this,
then call it with a string argument, then it will perform temporary
file substitution on it. This is used to circumvent the long command
line limitation.
Example usage:
env["TEMPFILE"] = TempFileMunge
env["LINKCOM"] = "${TEMPFILE('$LINK $TARGET $SOURCES')}"
By default, the name of the temporary file used begins with a
prefix of '@'. This may be configred for other tool chains by
setting '$TEMPFILEPREFIX'.
env["TEMPFILEPREFIX"] = '-@' # diab compiler
env["TEMPFILEPREFIX"] = '-via' # arm tool chain
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature):
if for_signature:
# If we're being called for signature calculation, it's
# because we're being called by the string expansion in
# Subst.py, which has the logic to strip any $( $) that
# may be in the command line we squirreled away. So we
# just return the raw command line and let the upper
# string substitution layers do their thing.
return self.cmd
# Now we're actually being called because someone is actually
# going to try to execute the command, so we have to do our
# own expansion.
cmd = env.subst_list(self.cmd, SCons.Subst.SUBST_CMD, target, source)[0]
try:
maxline = int(env.subst('$MAXLINELENGTH'))
except ValueError:
maxline = 2048
length = 0
for c in cmd:
length += len(c)
if length <= maxline:
return self.cmd
# We do a normpath because mktemp() has what appears to be
# a bug in Windows that will use a forward slash as a path
# delimiter. Windows's link mistakes that for a command line
# switch and barfs.
#
# We use the .lnk suffix for the benefit of the Phar Lap
# linkloc linker, which likes to append an .lnk suffix if
# none is given.
(fd, tmp) = tempfile.mkstemp('.lnk', text=True)
native_tmp = SCons.Util.get_native_path(os.path.normpath(tmp))
if env['SHELL'] and env['SHELL'] == 'sh':
# The sh shell will try to escape the backslashes in the
# path, so unescape them.
native_tmp = native_tmp.replace('\\', r'\\\\')
# In Cygwin, we want to use rm to delete the temporary
# file, because del does not exist in the sh shell.
rm = env.Detect('rm') or 'del'
else:
# Don't use 'rm' if the shell is not sh, because rm won't
# work with the Windows shells (cmd.exe or command.com) or
# Windows path names.
rm = 'del'
prefix = env.subst('$TEMPFILEPREFIX')
if not prefix:
prefix = '@'
# JDLA , Another round of escapes for win32, which is in msys in our case.
if sys.platform in ['win32'] :
for i,ptCmd in enumerate(cmd) :
cmd[i] = ptCmd.replace('\\','\\\\')
args = list(map(SCons.Subst.quote_spaces, cmd[1:]))
os.write(fd, " ".join(args) + "\n")
os.close(fd)
# XXX Using the SCons.Action.print_actions value directly
# like this is bogus, but expedient. This class should
# really be rewritten as an Action that defines the
# __call__() and strfunction() methods and lets the
# normal action-execution logic handle whether or not to
# print/execute the action. The problem, though, is all
# of that is decided before we execute this method as
# part of expanding the $TEMPFILE construction variable.
# Consequently, refactoring this will have to wait until
# we get more flexible with allowing Actions to exist
# independently and get strung together arbitrarily like
# Ant tasks. In the meantime, it's going to be more
# user-friendly to not let obsession with architectural
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
#if SCons.Action.print_actions:
if False :
print("Using tempfile "+native_tmp+" for command line:\n"+
str(cmd[0]) + " " + " ".join(args))
return [ cmd[0], prefix + native_tmp + '\n' + rm, native_tmp ]
################################################################################
| Python |
"""SCons.Tool.qt4
Tool-specific initialization for Qt4.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001-7,2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Additionally in Photivo :
# Changes done in/for Photivo are largely traceable due to the
# use of pt-Prefixes.
################################################################################
##
## Photivo
##
## Copyright (C) 2013 Jos De Laender <jos@de-laender.be>
##
## This file is part of Photivo.
##
## Photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## Photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import os.path
import re
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
import sys
class ToolQt4Warning(SCons.Warnings.Warning):
pass
class GeneratedMocFileNotIncluded(ToolQt4Warning):
pass
class QtdirNotFound(ToolQt4Warning):
pass
SCons.Warnings.enableWarningClass(ToolQt4Warning)
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=0):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
qrcinclude_re = re.compile(r'<file[^>]*>([^<]*)</file>', re.M)
def transformToWinePath(path) :
return os.popen('winepath -w "%s"'%path).read().strip().replace('\\','/')
header_extensions = [".h", ".hxx", ".hpp", ".hh"]
if SCons.Util.case_sensitive_suffixes('.h', '.H'):
header_extensions.append('.H')
# TODO: The following two lines will work when integrated back to SCons
# TODO: Meanwhile the third line will do the work
#cplusplus = __import__('c++', globals(), locals(), [])
#cxx_suffixes = cplusplus.CXXSuffixes
cxx_suffixes = [".c", ".cxx", ".cpp", ".cc"]
def checkMocIncluded(target, source, env):
moc = target[0]
cpp = source[0]
# looks like cpp.includes is cleared before the build stage :-(
# not really sure about the path transformations (moc.cwd? cpp.cwd?) :-/
path = SCons.Defaults.CScan.path_function(env, moc.cwd)
includes = SCons.Defaults.CScan(cpp, env, path)
if not moc in includes:
SCons.Warnings.warn(
GeneratedMocFileNotIncluded,
"Generated moc file '%s' is not included by '%s'" %
(str(moc), str(cpp)))
def find_file(filename, paths, node_factory):
for dir in paths:
node = node_factory(filename, dir)
if node.rexists():
return node
return None
class _Automoc:
"""
Callable class, which works as an emitter for Programs, SharedLibraries and
StaticLibraries.
"""
def __init__(self, objBuilderName):
self.objBuilderName = objBuilderName
# some regular expressions:
# Q_OBJECT detection
self.qo_search = re.compile(r'[^A-Za-z0-9]Q_OBJECT[^A-Za-z0-9]')
# cxx and c comment 'eater'
self.ccomment = re.compile(r'/\*(.*?)\*/',re.S)
self.cxxcomment = re.compile(r'//.*$',re.M)
# we also allow Q_OBJECT in a literal string
self.literal_qobject = re.compile(r'"[^\n]*Q_OBJECT[^\n]*"')
def create_automoc_options(self, env):
"""
Create a dictionary with variables related to Automocing,
based on the current environment.
Is executed once in the __call__ routine.
"""
moc_options = {'auto_scan' : True,
'auto_scan_strategy' : 0,
'gobble_comments' : 0,
'debug' : 0,
'auto_cpppath' : True,
'cpppaths' : []}
try:
if int(env.subst('$QT4_AUTOSCAN')) == 0:
moc_options['auto_scan'] = False
except ValueError:
pass
try:
moc_options['auto_scan_strategy'] = int(env.subst('$QT4_AUTOSCAN_STRATEGY'))
except ValueError:
pass
try:
moc_options['gobble_comments'] = int(env.subst('$QT4_GOBBLECOMMENTS'))
except ValueError:
pass
try:
moc_options['debug'] = int(env.subst('$QT4_DEBUG'))
except ValueError:
pass
try:
if int(env.subst('$QT4_AUTOMOC_SCANCPPPATH')) == 0:
moc_options['auto_cpppath'] = False
except ValueError:
pass
if moc_options['auto_cpppath']:
paths = env.get('QT4_AUTOMOC_CPPPATH', [])
if not paths:
paths = env.get('CPPPATH', [])
moc_options['cpppaths'].extend(paths)
return moc_options
def __automoc_strategy_simple(self, env, moc_options,
cpp, cpp_contents, out_sources):
"""
Default Automoc strategy (Q_OBJECT driven): detect a header file
(alongside the current cpp/cxx) that contains a Q_OBJECT
macro...and MOC it.
If a Q_OBJECT macro is also found in the cpp/cxx itself,
it gets MOCed too.
"""
h=None
for h_ext in header_extensions:
# try to find the header file in the corresponding source
# directory
hname = self.splitext(cpp.name)[0] + h_ext
h = find_file(hname, [cpp.get_dir()]+moc_options['cpppaths'], env.File)
if h:
if moc_options['debug']:
print "scons: qt4: Scanning '%s' (header of '%s')" % (str(h), str(cpp))
h_contents = h.get_contents()
if moc_options['gobble_comments']:
h_contents = self.ccomment.sub('', h_contents)
h_contents = self.cxxcomment.sub('', h_contents)
h_contents = self.literal_qobject.sub('""', h_contents)
break
if not h and moc_options['debug']:
print "scons: qt4: no header for '%s'." % (str(cpp))
if h and self.qo_search.search(h_contents):
# h file with the Q_OBJECT macro found -> add moc_cpp
moc_cpp = env.Moc4(h)
moc_o = self.objBuilder(moc_cpp)
out_sources.extend(moc_o)
if moc_options['debug']:
print "scons: qt4: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(moc_cpp))
if cpp and self.qo_search.search(cpp_contents):
# cpp file with Q_OBJECT macro found -> add moc
# (to be included in cpp)
moc = env.Moc4(cpp)
env.Ignore(moc, moc)
if moc_options['debug']:
print "scons: qt4: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc))
def __automoc_strategy_include_driven(self, env, moc_options,
cpp, cpp_contents, out_sources):
"""
Automoc strategy #1 (include driven): searches for "include"
statements of MOCed files in the current cpp/cxx file.
This strategy tries to add support for the compilation
of the qtsolutions...
"""
if self.splitext(str(cpp))[1] in cxx_suffixes:
added = False
h_moc = "%s%s%s" % (env.subst('$QT4_XMOCHPREFIX'),
self.splitext(cpp.name)[0],
env.subst('$QT4_XMOCHSUFFIX'))
cxx_moc = "%s%s%s" % (env.subst('$QT4_XMOCCXXPREFIX'),
self.splitext(cpp.name)[0],
env.subst('$QT4_XMOCCXXSUFFIX'))
inc_h_moc = r'#include\s+"%s"' % h_moc
inc_cxx_moc = r'#include\s+"%s"' % cxx_moc
# Search for special includes in qtsolutions style
if cpp and re.search(inc_h_moc, cpp_contents):
# cpp file with #include directive for a MOCed header found -> add moc
# Try to find header file
h=None
hname=""
for h_ext in header_extensions:
# Try to find the header file in the
# corresponding source directory
hname = self.splitext(cpp.name)[0] + h_ext
h = find_file(hname, [cpp.get_dir()]+moc_options['cpppaths'], env.File)
if h:
if moc_options['debug']:
print "scons: qt4: Scanning '%s' (header of '%s')" % (str(h), str(cpp))
h_contents = h.get_contents()
if moc_options['gobble_comments']:
h_contents = self.ccomment.sub('', h_contents)
h_contents = self.cxxcomment.sub('', h_contents)
h_contents = self.literal_qobject.sub('""', h_contents)
break
if not h and moc_options['debug']:
print "scons: qt4: no header for '%s'." % (str(cpp))
if h and self.qo_search.search(h_contents):
# h file with the Q_OBJECT macro found -> add moc_cpp
moc_cpp = env.XMoc4(h)
env.Ignore(moc_cpp, moc_cpp)
added = True
# Removing file from list of sources, because it is not to be
# compiled but simply included by the cpp/cxx file.
for idx, s in enumerate(out_sources):
if hasattr(s, "sources") and len(s.sources) > 0:
if str(s.sources[0]) == h_moc:
out_sources.pop(idx)
break
if moc_options['debug']:
print "scons: qt4: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(h_moc))
else:
if moc_options['debug']:
print "scons: qt4: found no Q_OBJECT macro in '%s', but a moc'ed version '%s' gets included in '%s'" % (str(h), inc_h_moc, cpp.name)
if cpp and re.search(inc_cxx_moc, cpp_contents):
# cpp file with #include directive for a MOCed cxx file found -> add moc
if self.qo_search.search(cpp_contents):
moc = env.XMoc4(target=cxx_moc, source=cpp)
env.Ignore(moc, moc)
added = True
if moc_options['debug']:
print "scons: qt4: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc))
else:
if moc_options['debug']:
print "scons: qt4: found no Q_OBJECT macro in '%s', although a moc'ed version '%s' of itself gets included" % (cpp.name, inc_cxx_moc)
if not added:
# Fallback to default Automoc strategy (Q_OBJECT driven)
self.__automoc_strategy_simple(env, moc_options, cpp,
cpp_contents, out_sources)
def __call__(self, target, source, env):
"""
Smart autoscan function. Gets the list of objects for the Program
or Lib. Adds objects and builders for the special qt4 files.
"""
moc_options = self.create_automoc_options(env)
# some shortcuts used in the scanner
self.splitext = SCons.Util.splitext
self.objBuilder = getattr(env, self.objBuilderName)
# The following is kind of hacky to get builders working properly (FIXME)
objBuilderEnv = self.objBuilder.env
self.objBuilder.env = env
mocBuilderEnv = env.Moc4.env
env.Moc4.env = env
xMocBuilderEnv = env.XMoc4.env
env.XMoc4.env = env
# make a deep copy for the result; MocH objects will be appended
out_sources = source[:]
for obj in source:
if not moc_options['auto_scan']:
break
if isinstance(obj,basestring): # big kludge!
print "scons: qt4: '%s' MAYBE USING AN OLD SCONS VERSION AND NOT CONVERTED TO 'File'. Discarded." % str(obj)
continue
if not obj.has_builder():
# binary obj file provided
if moc_options['debug']:
print "scons: qt4: '%s' seems to be a binary. Discarded." % str(obj)
continue
cpp = obj.sources[0]
if not self.splitext(str(cpp))[1] in cxx_suffixes:
if moc_options['debug']:
print "scons: qt4: '%s' is no cxx file. Discarded." % str(cpp)
# c or fortran source
continue
try:
cpp_contents = cpp.get_contents()
if moc_options['gobble_comments']:
cpp_contents = self.ccomment.sub('', cpp_contents)
cpp_contents = self.cxxcomment.sub('', cpp_contents)
cpp_contents = self.literal_qobject.sub('""', cpp_contents)
except: continue # may be an still not generated source
if moc_options['auto_scan_strategy'] == 0:
# Default Automoc strategy (Q_OBJECT driven)
self.__automoc_strategy_simple(env, moc_options,
cpp, cpp_contents, out_sources)
else:
# Automoc strategy #1 (include driven)
self.__automoc_strategy_include_driven(env, moc_options,
cpp, cpp_contents, out_sources)
# restore the original env attributes (FIXME)
self.objBuilder.env = objBuilderEnv
env.Moc4.env = mocBuilderEnv
env.XMoc4.env = xMocBuilderEnv
# We return the set of source entries as sorted sequence, else
# the order might accidentally change from one build to another
# and trigger unwanted rebuilds. For proper sorting, a key function
# has to be specified...FS.Entry (and Base nodes in general) do not
# provide a __cmp__, for performance reasons.
return (target, sorted(set(out_sources), key=lambda entry : str(entry)))
AutomocShared = _Automoc('SharedObject')
AutomocStatic = _Automoc('StaticObject')
def _detect(env):
"""Not really safe, but fast method to detect the Qt4 library"""
# TODO: check output of "moc -v" for correct version >= 4.0.0
try: return env['QT4DIR']
except KeyError: pass
try: return env['QTDIR']
except KeyError: pass
try: return os.environ['QT4DIR']
except KeyError: pass
try: return os.environ['QTDIR']
except KeyError: pass
moc = env.WhereIs('moc-qt4') or env.WhereIs('moc4') or env.WhereIs('moc')
if moc:
QT4DIR = os.path.dirname(os.path.dirname(moc))
SCons.Warnings.warn(
QtdirNotFound,
"QT4DIR variable is not defined, using moc executable as a hint (QT4DIR=%s)" % QT4DIR)
return QT4DIR
raise SCons.Errors.StopError(
QtdirNotFound,
"Could not detect Qt 4 installation")
return None
def __scanResources(node, env, path, arg):
# Helper function for scanning .qrc resource files
# I've been careful on providing names relative to the qrc file
# If that was not needed this code could be simplified a lot
def recursiveFiles(basepath, path) :
result = []
for item in os.listdir(os.path.join(basepath, path)) :
itemPath = os.path.join(path, item)
if os.path.isdir(os.path.join(basepath, itemPath)) :
result += recursiveFiles(basepath, itemPath)
else:
result.append(itemPath)
return result
contents = node.get_contents()
includes = qrcinclude_re.findall(contents)
qrcpath = os.path.dirname(node.path)
dirs = [included for included in includes if os.path.isdir(os.path.join(qrcpath,included))]
# dirs need to include files recursively
for dir in dirs :
includes.remove(dir)
includes+=recursiveFiles(qrcpath,dir)
return includes
#
# Scanners
#
__qrcscanner = SCons.Scanner.Scanner(name = 'qrcfile',
function = __scanResources,
argument = None,
skeys = ['.qrc'])
#
# Emitters
#
def __qrc_path(head, prefix, tail, suffix):
if head:
if tail:
return os.path.join(head, "%s%s%s" % (prefix, tail, suffix))
else:
return "%s%s%s" % (prefix, head, suffix)
else:
return "%s%s%s" % (prefix, tail, suffix)
def __qrc_emitter(target, source, env):
sourceBase, sourceExt = os.path.splitext(SCons.Util.to_String(source[0]))
sHead = None
sTail = sourceBase
if sourceBase:
sHead, sTail = os.path.split(sourceBase)
t = __qrc_path(sHead, env.subst('$QT4_QRCCXXPREFIX'),
sTail, env.subst('$QT4_QRCCXXSUFFIX'))
return t, source
#
# Action generators
#
def __moc_generator_from_h(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT4_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return '$QT4_MOC $QT4_MOCDEFINES $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE'
else:
return '$QT4_MOC $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE'
def __moc_generator_from_cxx(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT4_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return ['$QT4_MOC $QT4_MOCDEFINES $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
else:
return ['$QT4_MOC $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
def __mocx_generator_from_h(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT4_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return '$QT4_MOC $QT4_MOCDEFINES $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE'
else:
return '$QT4_MOC $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE'
def __mocx_generator_from_cxx(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT4_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return ['$QT4_MOC $QT4_MOCDEFINES $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
else:
return ['$QT4_MOC $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
def __qrc_generator(source, target, env, for_signature):
name_defined = False
try:
if env.subst('$QT4_QRCFLAGS').find('-name') >= 0:
name_defined = True
except ValueError:
pass
if name_defined:
return '$QT4_RCC $QT4_QRCFLAGS $SOURCE -o $TARGET'
else:
qrc_suffix = env.subst('$QT4_QRCSUFFIX')
src = str(source[0])
head, tail = os.path.split(src)
if tail:
src = tail
qrc_suffix = env.subst('$QT4_QRCSUFFIX')
if src.endswith(qrc_suffix):
qrc_stem = src[:-len(qrc_suffix)]
else:
qrc_stem = src
return '$QT4_RCC $QT4_QRCFLAGS -name %s $SOURCE -o $TARGET' % qrc_stem
#
# Builders
#
__ts_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT4_LUPDATECOM','$QT4_LUPDATECOMSTR'),
suffix = '.ts',
source_factory = SCons.Node.FS.Entry)
__qm_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT4_LRELEASECOM','$QT4_LRELEASECOMSTR'),
src_suffix = '.ts',
suffix = '.qm')
__qrc_builder = SCons.Builder.Builder(
action = SCons.Action.CommandGeneratorAction(__qrc_generator,
{"cmdstr":"QT4_QRCCOMSTR"}),
source_scanner = __qrcscanner,
src_suffix = '$QT4_QRCSUFFIX',
suffix = '$QT4_QRCCXXSUFFIX',
prefix = '$QT4_QRCCXXPREFIX',
single_source = 1)
__ex_moc_builder = SCons.Builder.Builder(
action = SCons.Action.CommandGeneratorAction(__moc_generator_from_h,
{"cmdstr":"$QT4_MOCFROMHCOMSTR"}))
__ex_uic_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT4_UICCOM', '$QT4_UICCOMSTR'),
src_suffix = '.ui')
#
# Wrappers (pseudo-Builders)
#
def Ts4(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the LUPDATE executable of Qt4.
lupdate [options] [source-file|path]... -ts ts-files
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
# Check QT4_CLEAN_TS and use NoClean() function
clean_ts = False
try:
if int(env.subst('$QT4_CLEAN_TS')) == 1:
clean_ts = True
except ValueError:
pass
result = []
for t in target:
obj = __ts_builder.__call__(env, t, source, **kw)
# Prevent deletion of the .ts file, unless explicitly specified
if not clean_ts:
env.NoClean(obj)
# Always make our target "precious", such that it is not deleted
# prior to a rebuild
env.Precious(obj)
# Add to resulting target list
result.extend(obj)
return result
def Qm4(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the LRELEASE executable of Qt4.
lrelease [options] ts-files [-qm qm-file]
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
result.extend(__qm_builder.__call__(env, t, source, **kw))
return result
def Qrc4(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the RCC executable of Qt4.
rcc [options] qrc-files -o out-file
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t, s in zip(target, source):
result.extend(__qrc_builder.__call__(env, t, s, **kw))
return result
def ExplicitMoc4(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the MOC executable of Qt4.
moc [options] <header-file>
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
# Is it a header or a cxx file?
result.extend(__ex_moc_builder.__call__(env, t, source, **kw))
return result
def ExplicitUic4(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the UIC executable of Qt4.
uic [options] <uifile>
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
result.extend(__ex_uic_builder.__call__(env, t, source, **kw))
return result
def generate(env):
"""Add Builders and construction variables for qt4 to an Environment."""
def locateQt4Command(env, command, qtdir) :
# Take cross into account.
ptCrossCommand = env['PT_CROSS'] + command
# Decorations (linux and msys/mingw/cygwin accept. env in linux)
# We issue something like 'ID=DSCONS_UIC /path/to/uic'
# This is just for recognizing at command print time.
ptDecoration = ''
if command == 'moc':
ptDecoration = 'ID=DSCONS_MOC'
elif command == 'uic' :
ptDecoration = 'ID=DSCONS_UIC'
elif command == 'rcc' :
ptDecoration = 'ID=DSCONS_RCC'
elif command == 'lupdate' :
ptDecoration = 'ID=DSCONS_LUPDATE'
elif command == 'lrelease' :
ptDecoration = 'ID=DSCONS_LRELEASE'
if sys.platform.startswith('win') :
ptDecoration = ''
suffixes = [
'-qt4',
'-qt4.exe',
'4',
'4.exe',
'',
'.exe',
]
triedPaths = []
for suffix in suffixes :
fullpath = os.path.join(qtdir,'bin',ptCrossCommand + suffix)
if os.access(fullpath, os.X_OK) :
return ptDecoration + ' ' + fullpath
triedPaths.append(fullpath)
fullpath = env.Detect([ptCrossCommand+'-qt4',
ptCrossCommand+'4',
ptCrossCommand])
if not (fullpath is None) : return ptDecoration + ' ' + fullpath
if command in ('lupdate','lrelease'):
print 'Qt4 could not locate \'' + \
ptCrossCommand + '\' ' + \
'(This might be acceptable)'
return None
raise Exception("Qt4 command '" + command + "' not found. Tried: " + ', '.join(triedPaths))
CLVar = SCons.Util.CLVar
Action = SCons.Action.Action
Builder = SCons.Builder.Builder
env['QT4DIR'] = _detect(env)
# TODO: 'Replace' should be 'SetDefault'
# env.SetDefault(
env.Replace(
QT4DIR = _detect(env),
QT4_BINPATH = os.path.join('$QT4DIR', 'bin'),
QT4_LIBPATH = os.path.join('$QT4DIR', 'lib'),
# TODO: This is not reliable to QT4DIR value changes but needed in order to support '-qt4' variants
QT4_MOC = locateQt4Command(env,'moc', env['QT4DIR']),
QT4_UIC = locateQt4Command(env,'uic', env['QT4DIR']),
QT4_RCC = locateQt4Command(env,'rcc', env['QT4DIR']),
QT4_LUPDATE = locateQt4Command(env,'lupdate', env['QT4DIR']),
QT4_LRELEASE = locateQt4Command(env,'lrelease', env['QT4DIR']),
QT4_AUTOSCAN = 1, # Should the qt4 tool try to figure out, which sources are to be moc'ed?
QT4_AUTOSCAN_STRATEGY = 0, # While scanning for files to moc, should we search for includes in qtsolutions style?
QT4_GOBBLECOMMENTS = 0, # If set to 1, comments are removed before scanning cxx/h files.
QT4_CPPDEFINES_PASSTOMOC = 1, # If set to 1, all CPPDEFINES get passed to the moc executable.
QT4_CLEAN_TS = 0, # If set to 1, translation files (.ts) get cleaned on 'scons -c'
QT4_AUTOMOC_SCANCPPPATH = 1, # If set to 1, the CPPPATHs (or QT4_AUTOMOC_CPPPATH) get scanned for moc'able files
QT4_AUTOMOC_CPPPATH = [], # Alternative paths that get scanned for moc files
# Some Qt4 specific flags. I don't expect someone wants to
# manipulate those ...
QT4_UICFLAGS = CLVar(''),
QT4_MOCFROMHFLAGS = CLVar(''),
QT4_MOCFROMCXXFLAGS = CLVar('-i'),
QT4_QRCFLAGS = '',
QT4_LUPDATEFLAGS = '',
QT4_LRELEASEFLAGS = '',
# suffixes/prefixes for the headers / sources to generate
QT4_UISUFFIX = '.ui',
QT4_UICDECLPREFIX = 'ui_',
QT4_UICDECLSUFFIX = '.h',
QT4_MOCINCPREFIX = '-I',
QT4_MOCHPREFIX = 'moc_',
QT4_MOCHSUFFIX = '$CXXFILESUFFIX',
QT4_MOCCXXPREFIX = '',
QT4_MOCCXXSUFFIX = '.moc',
QT4_QRCSUFFIX = '.qrc',
QT4_QRCCXXSUFFIX = '$CXXFILESUFFIX',
QT4_QRCCXXPREFIX = 'qrc_',
QT4_MOCDEFPREFIX = '-D',
QT4_MOCDEFSUFFIX = '',
QT4_MOCDEFINES = '${_defines(QT4_MOCDEFPREFIX, CPPDEFINES, QT4_MOCDEFSUFFIX, __env__)}',
QT4_MOCCPPPATH = [],
QT4_MOCINCFLAGS = '$( ${_concat(QT4_MOCINCPREFIX, QT4_MOCCPPPATH, INCSUFFIX, __env__, RDirs)} $)',
# Commands for the qt4 support ...
QT4_UICCOM = '$QT4_UIC $QT4_UICFLAGS -o $TARGET $SOURCE',
QT4_LUPDATECOM = '$QT4_LUPDATE $QT4_LUPDATEFLAGS $SOURCES -ts $TARGET',
QT4_LRELEASECOM = '$QT4_LRELEASE $QT4_LRELEASEFLAGS -qm $TARGET $SOURCES',
# Specialized variables for the Extended Automoc support
# (Strategy #1 for qtsolutions)
QT4_XMOCHPREFIX = 'moc_',
QT4_XMOCHSUFFIX = '.cpp',
QT4_XMOCCXXPREFIX = '',
QT4_XMOCCXXSUFFIX = '.moc',
)
try:
env.AddMethod(Ts4, "Ts4")
env.AddMethod(Qm4, "Qm4")
env.AddMethod(Qrc4, "Qrc4")
env.AddMethod(ExplicitMoc4, "ExplicitMoc4")
env.AddMethod(ExplicitUic4, "ExplicitUic4")
except AttributeError:
# Looks like we use a pre-0.98 version of SCons...
from SCons.Script.SConscript import SConsEnvironment
SConsEnvironment.Ts4 = Ts4
SConsEnvironment.Qm4 = Qm4
SConsEnvironment.Qrc4 = Qrc4
SConsEnvironment.ExplicitMoc4 = ExplicitMoc4
SConsEnvironment.ExplicitUic4 = ExplicitUic4
# Interface builder
uic4builder = Builder(
action = SCons.Action.Action('$QT4_UICCOM', '$QT4_UICCOMSTR'),
src_suffix='$QT4_UISUFFIX',
suffix='$QT4_UICDECLSUFFIX',
prefix='$QT4_UICDECLPREFIX',
single_source = True
#TODO: Consider the uiscanner on new scons version
)
env['BUILDERS']['Uic4'] = uic4builder
# Metaobject builder
mocBld = Builder(action={}, prefix={}, suffix={})
for h in header_extensions:
act = SCons.Action.CommandGeneratorAction(__moc_generator_from_h,
{"cmdstr":"$QT4_MOCFROMHCOMSTR"})
mocBld.add_action(h, act)
mocBld.prefix[h] = '$QT4_MOCHPREFIX'
mocBld.suffix[h] = '$QT4_MOCHSUFFIX'
for cxx in cxx_suffixes:
act = SCons.Action.CommandGeneratorAction(__moc_generator_from_cxx,
{"cmdstr":"$QT4_MOCFROMCXXCOMSTR"})
mocBld.add_action(cxx, act)
mocBld.prefix[cxx] = '$QT4_MOCCXXPREFIX'
mocBld.suffix[cxx] = '$QT4_MOCCXXSUFFIX'
env['BUILDERS']['Moc4'] = mocBld
# Metaobject builder for the extended auto scan feature
# (Strategy #1 for qtsolutions)
xMocBld = Builder(action={}, prefix={}, suffix={})
for h in header_extensions:
act = SCons.Action.CommandGeneratorAction(__mocx_generator_from_h,
{"cmdstr":"$QT4_MOCFROMHCOMSTR"})
xMocBld.add_action(h, act)
xMocBld.prefix[h] = '$QT4_XMOCHPREFIX'
xMocBld.suffix[h] = '$QT4_XMOCHSUFFIX'
for cxx in cxx_suffixes:
act = SCons.Action.CommandGeneratorAction(__mocx_generator_from_cxx,
{"cmdstr":"$QT4_MOCFROMCXXCOMSTR"})
xMocBld.add_action(cxx, act)
xMocBld.prefix[cxx] = '$QT4_XMOCCXXPREFIX'
xMocBld.suffix[cxx] = '$QT4_XMOCCXXSUFFIX'
env['BUILDERS']['XMoc4'] = xMocBld
# Add the Qrc4 action to the CXX file builder (registers the
# *.qrc extension with the Environment)
cfile_builder, cxxfile_builder = SCons.Tool.createCFileBuilders(env)
qrc_act = SCons.Action.CommandGeneratorAction(__qrc_generator,
{"cmdstr":"$QT4_QRCCOMSTR"})
cxxfile_builder.add_action('$QT4_QRCSUFFIX', qrc_act)
cxxfile_builder.add_emitter('$QT4_QRCSUFFIX', __qrc_emitter)
# We use the emitters of Program / StaticLibrary / SharedLibrary
# to scan for moc'able files
# We can't refer to the builders directly, we have to fetch them
# as Environment attributes because that sets them up to be called
# correctly later by our emitter.
env.AppendUnique(PROGEMITTER =[AutomocStatic],
SHLIBEMITTER=[AutomocShared],
LIBEMITTER =[AutomocStatic],
)
# TODO: Does dbusxml2cpp need an adapter
try:
env.AddMethod(enable_modules, "EnableQt4Modules")
except AttributeError:
# Looks like we use a pre-0.98 version of SCons...
from SCons.Script.SConscript import SConsEnvironment
SConsEnvironment.EnableQt4Modules = enable_modules
def enable_modules(self, modules, debug=False) :
import sys
validModules = [
'QtCore',
'QtGui',
'QtOpenGL',
'Qt3Support',
'QtAssistant', # deprecated
'QtAssistantClient',
'QtScript',
'QtDBus',
'QtSql',
'QtSvg',
# The next modules have not been tested yet so, please
# maybe they require additional work on non Linux platforms
'QtNetwork',
'QtTest',
'QtXml',
'QtXmlPatterns',
'QtUiTools',
'QtDesigner',
'QtDesignerComponents',
'QtWebKit',
'QtHelp',
'QtScript',
'QtScriptTools',
'QtMultimedia',
]
pclessModules = [
# in qt <= 4.3 designer and designerComponents are pcless, on qt4.4 they are not, so removed.
# 'QtDesigner',
# 'QtDesignerComponents',
]
staticModules = [
'QtUiTools',
]
invalidModules=[]
for module in modules:
if module not in validModules :
invalidModules.append(module)
if invalidModules :
raise Exception("Modules %s are not Qt4 modules. Valid Qt4 modules are: %s"% (
str(invalidModules),str(validModules)))
moduleDefines = {
'QtScript' : ['QT_SCRIPT_LIB'],
'QtSvg' : ['QT_SVG_LIB'],
'Qt3Support' : ['QT_QT3SUPPORT_LIB','QT3_SUPPORT'],
'QtSql' : ['QT_SQL_LIB'],
'QtXml' : ['QT_XML_LIB'],
'QtOpenGL' : ['QT_OPENGL_LIB'],
'QtGui' : ['QT_GUI_LIB'],
'QtNetwork' : ['QT_NETWORK_LIB'],
'QtCore' : ['QT_CORE_LIB'],
}
for module in modules :
try : self.AppendUnique(CPPDEFINES=moduleDefines[module])
except: pass
debugSuffix = ''
if sys.platform in ["darwin", "linux2", "win32"] :
if debug :
if sys.platform in ["win32"] :
debugSuffix = 'd'
else :
debugSuffix = '_debug'
for module in modules :
if module not in pclessModules : continue
self.AppendUnique(LIBS=[module+debugSuffix])
self.AppendUnique(LIBPATH=[os.path.join("$QT4DIR","lib")])
self.AppendUnique(CPPPATH=[os.path.join("$QT4DIR","include","qt4")])
self.AppendUnique(CPPPATH=[os.path.join("$QT4DIR","include","qt4",module)])
pcmodules = [module+debugSuffix for module in modules if module not in pclessModules ]
if 'QtDBus' in pcmodules:
self.AppendUnique(CPPPATH=[os.path.join("$QT4DIR","include","qt4","QtDBus")])
if "QtAssistant" in pcmodules:
self.AppendUnique(CPPPATH=[os.path.join("$QT4DIR","include","qt4","QtAssistant")])
self["QT4_MOCCPPPATH"] = self["CPPPATH"]
return
else :
print "CHECK ME. SHOULDN'T"
Exit(1)
def exists(env):
return _detect(env)
| Python |
#!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.10"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
def _same_content(path, content):
return open(path).read() == content
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
@_no_sandbox
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
@_no_sandbox
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
elif option == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| Python |
# dataelem.py
"""Define the DataElement class - elements within a dataset.
DataElements have a DICOM value representation VR, a value multiplicity VM,
and a value.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
from __future__ import absolute_import
import sys
from dicom import in_py3
import logging
logger = logging.getLogger('pydicom')
from dicom.datadict import dictionary_has_tag, dictionary_description
from dicom.datadict import private_dictionary_description, dictionaryVR
from dicom.tag import Tag
from dicom.UID import UID
from dicom.valuerep import IS, DS, PersonName
from decimal import Decimal
from collections import namedtuple
# os.stat is only available on Unix and Windows
# Not sure if on other platforms the import fails, or the call to it??
stat_available = True
try:
from os import stat
except:
stat_available = False
import os.path
from dicom.filebase import DicomFile
import warnings
# Helper functions:
def isMultiValue(value):
"""Helper function: return True if 'value' is 'list-like'."""
if isString(value):
return False
try:
iter(value)
except TypeError:
return False
return True
def isString(val):
"""Helper function: return True if val is a string."""
if in_py3:
return isinstance(val, str) or isinstance(val, bytes)
else:
return isinstance(val, basestring)
def isStringOrStringList(val):
"""Return true if val consists only of strings. val may be a list/tuple."""
if isMultiValue(val):
for item in val:
if not isString(item):
return False
return True
else: # single value - test for a string
return isString(val)
_backslash = "\\" # double '\' because it is used as escape chr in Python
class DataElement(object):
"""Contain and manipulate a Dicom data element, having a tag, VR, VM and value.
Most user code will not create data elements using this class directly,
but rather through 'named tags' in Dataset objects.
See the Dataset class for a description of how Datasets, Sequences,
and DataElements work.
Class Data
----------
For string display (via __str__), the following are used:
descripWidth -- maximum width of description field (default 35).
maxBytesToDisplay -- longer data will display "array of # bytes" (default 16).
showVR -- True (default) to include the dicom VR just before the value.
"""
descripWidth = 35
maxBytesToDisplay = 16
showVR = 1
def __init__(self, tag, VR, value, file_value_tell=None,
is_undefined_length=False):
"""Create a data element instance.
Most user code should instead use DICOM keywords, (formerly 'Named tags'
in pydicom) to create data_elements, for which only the value is supplied,
and the VR and tag are determined from the dicom dictionary.
tag -- dicom (group, element) tag in any form accepted by Tag().
VR -- dicom value representation (see DICOM standard part 6)
value -- the value of the data element. One of the following:
- a single string value
- a number
- a list or tuple with all strings or all numbers
- a multi-value string with backslash separator
file_value_tell -- used internally by Dataset, to store the write
position for ReplaceDataElementValue method
is_undefined_length -- used internally to store whether the length
field in this data element was 0xFFFFFFFFL, i.e. "undefined length"
"""
self.tag = Tag(tag)
self.VR = VR # Note!: you must set VR before setting value
self.value = value
self.file_tell = file_value_tell
self.is_undefined_length = is_undefined_length
@property
def value(self):
"""The value (possibly multiple values) of this data_element"""
return self._value
@value.setter
def value(self, val):
"""Set method for 'value' property"""
# Check if is a string with multiple values separated by '\'
# If so, turn them into a list of separate strings
if isString(val) and self.VR not in \
['UT', 'ST', 'LT', 'FL', 'FD', 'AT', 'OB', 'OW', 'OF', 'SL', 'SQ', 'SS',
'UL', 'OB/OW', 'OW/OB', 'OB or OW', 'OW or OB', 'UN'] and 'US' not in self.VR: # latter covers 'US or SS' etc
if _backslash in val:
val = val.split(_backslash)
self._value = self._convert_value(val)
@property
def VM(self):
"""The number of values in the data_element's 'value'"""
if isMultiValue(self.value):
return len(self.value)
else:
return 1
def _convert_value(self, val):
"""Convert Dicom string values if possible to e.g. numbers. Handle the case
of multiple value data_elements"""
if self.VR == 'SQ': # a sequence - leave it alone
from dicom.sequence import Sequence
if isinstance(val, Sequence):
return val
else:
return Sequence(val)
# if the value is a list, convert each element
try:
val.append
except AttributeError: # not a list
return self._convert(val)
else:
returnvalue = []
for subval in val:
returnvalue.append(self._convert(subval))
return returnvalue
def _convert(self, val):
"""Take the value and convert to number, etc if possible"""
if self.VR == 'IS':
return IS(val)
elif self.VR == 'DS':
return DS(val)
elif self.VR == "UI":
return UID(val)
# Later may need this for PersonName as for UI,
# but needs more thought
# elif self.VR == "PN":
# return PersonName(val)
else: # is either a string or a type 2 optionally blank string
return val # this means a "numeric" value could be empty string ""
#except TypeError:
#print "Could not convert value '%s' to VR '%s' in tag %s" \
# % (repr(val), self.VR, self.tag)
#except ValueError:
#print "Could not convert value '%s' to VR '%s' in tag %s" \
# % (repr(val), self.VR, self.tag)
def __str__(self):
"""Return str representation of this data_element"""
repVal = self.repval
if self.showVR:
s = "%s %-*s %s: %s" % (str(self.tag), self.descripWidth,
self.description()[:self.descripWidth], self.VR, repVal)
else:
s = "%s %-*s %s" % (str(self.tag), self.descripWidth,
self.description()[:self.descripWidth], repVal)
return s
@property
def repval(self):
"""Return a str representation of the current value for use in __str__"""
if (self.VR in ['OB', 'OW', 'OW/OB', 'OW or OB', 'OB or OW', 'US or SS or OW', 'US or SS']
and len(self.value) > self.maxBytesToDisplay):
repVal = "Array of %d bytes" % len(self.value)
elif hasattr(self, 'original_string'): # for VR of IS or DS
repVal = repr(self.original_string)
elif isinstance(self.value, Decimal):
repVal = repr(self.value)
elif isinstance(self.value, UID):
repVal = self.value.name
else:
repVal = repr(self.value) # will tolerate unicode too
return repVal
def __unicode__(self):
"""Return unicode representation of this data_element"""
if isinstance(self.value, unicode):
# start with the string rep then replace the value part with the unicode
strVal = str(self)
uniVal = unicode(strVal.replace(self.repval, "")) + self.value
return uniVal
else:
return unicode(str(self))
def __getitem__(self, key):
"""Returns the item from my value's Sequence, if it is one."""
try:
return self.value[key]
except TypeError:
raise TypeError("DataElement value is unscriptable (not a Sequence)")
@property
def name(self):
return self.description()
def description(self):
"""Return the DICOM dictionary description for this dicom tag."""
if dictionary_has_tag(self.tag):
name = dictionary_description(self.tag)
elif self.tag.is_private:
name = "Private tag data" # default
if hasattr(self, 'private_creator'):
try:
# If have name from private dictionary, use it, but
# but put in square brackets so is differentiated,
# and clear that cannot access it by name
name = "[" + private_dictionary_description(self.tag, self.private_creator) + "]"
except KeyError:
pass
elif self.tag.elem >> 8 == 0:
name = "Private Creator"
elif self.tag.element == 0: # implied Group Length dicom versions < 3
name = "Group Length"
else:
name = ""
return name
def __repr__(self):
"""Handle repr(data_element)"""
if self.VR == "SQ":
return repr(self.value)
else:
return str(self)
class DeferredDataElement(DataElement):
"""Subclass of DataElement where value is not read into memory until needed"""
def __init__(self, tag, VR, fp, file_mtime, data_element_tell, length):
"""Store basic info for the data element but value will be read later
fp -- DicomFile object representing the dicom file being read
file_mtime -- last modification time on file, used to make sure
it has not changed since original read
data_element_tell -- file position at start of data element,
(not the start of the value part, but start of whole element)
"""
self.tag = Tag(tag)
self.VR = VR
self._value = None # flag as unread
# Check current file object and save info needed for read later
self.fp_is_implicit_VR = fp.is_implicit_VR
self.fp_is_little_endian = fp.is_little_endian
self.filepath = fp.name
self.file_mtime = file_mtime
self.data_element_tell = data_element_tell
self.length = length
@property
def repval(self):
if self._value is None:
return "Deferred read: length %d" % self.length
else:
return DataElement.repval.fget(self)
@property
def value(self):
"""Get method for 'value' property"""
# Must now read the value if haven't already
if self._value is None:
self.read_value()
return DataElement.value.fget(self)
@value.setter
def value(self, val):
DataElement.value.fset(self, val)
RawDataElement = namedtuple('RawDataElement',
'tag VR length value value_tell is_implicit_VR is_little_endian')
def DataElement_from_raw(raw_data_element):
"""Return a DataElement from a RawDataElement"""
from dicom.values import convert_value # XXX buried here to avoid circular import filereader->Dataset->convert_value->filereader (for SQ parsing)
raw = raw_data_element
VR = raw.VR
if VR is None: # Can be if was implicit VR
try:
VR = dictionaryVR(raw.tag)
except KeyError:
if raw.tag.is_private:
VR = 'OB' # just read the bytes, no way to know what they mean
elif raw.tag.element == 0: # group length tag implied in versions < 3.0
VR = 'UL'
else:
raise KeyError("Unknown DICOM tag {0:s} - can't look up VR".format(str(raw.tag)))
try:
value = convert_value(VR, raw)
except NotImplementedError as e:
raise NotImplementedError("{0:s} in tag {1!r}".format(str(e), raw.tag))
return DataElement(raw.tag, VR, value, raw.value_tell, raw.length == 0xFFFFFFFF)
| Python |
# datadict.py
# -*- coding: utf-8 -*-
"""Access dicom dictionary information"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
import sys
import logging
logger = logging.getLogger("pydicom")
from dicom.tag import Tag
from dicom._dicom_dict import DicomDictionary # the actual dict of {tag: (VR, VM, name, is_retired, keyword), ...}
from dicom._dicom_dict import RepeatersDictionary # those with tags like "(50xx, 0005)"
from dicom._private_dict import private_dictionaries
import warnings
from dicom import in_py3
# Generate mask dict for checking repeating groups etc.
# Map a true bitwise mask to the DICOM mask with "x"'s in it.
masks = {}
for mask_x in RepeatersDictionary:
# mask1 is XOR'd to see that all non-"x" bits are identical (XOR result = 0 if bits same)
# then AND those out with 0 bits at the "x" ("we don't care") location using mask2
mask1 = long(mask_x.replace("x", "0"), 16)
mask2 = long("".join(["F0"[c == "x"] for c in mask_x]), 16)
masks[mask_x] = (mask1, mask2)
# For shorter naming of dicom member elements, put an entry here
# (longer naming can also still be used)
# The descriptive name must start with the long version (not replaced if internal)
shortNames = [("BeamLimitingDevice", "BLD"),
("RTBeamLimitingDevice", "RTBLD"),
("ControlPoint", "CP"),
("Referenced", "Refd")
]
def mask_match(tag):
for mask_x, (mask1, mask2) in masks.items():
if (tag ^ mask1) & mask2 == 0:
return mask_x
return None
def get_entry(tag):
"""Return the tuple (VR, VM, name, is_retired, keyword) from the DICOM dictionary
If the entry is not in the main dictionary, check the masked ones,
e.g. repeating groups like 50xx, etc.
"""
tag = Tag(tag)
try:
return DicomDictionary[tag]
except KeyError:
mask_x = mask_match(tag)
if mask_x:
return RepeatersDictionary[mask_x]
else:
raise KeyError("Tag {0} not found in DICOM dictionary".format(tag))
def dictionary_description(tag):
"""Return the descriptive text for the given dicom tag."""
return get_entry(tag)[2]
def dictionaryVM(tag):
"""Return the dicom value multiplicity for the given dicom tag."""
return get_entry(tag)[1]
def dictionaryVR(tag):
"""Return the dicom value representation for the given dicom tag."""
return get_entry(tag)[0]
def dictionary_has_tag(tag):
"""Return True if the dicom dictionary has an entry for the given tag."""
return (tag in DicomDictionary)
def dictionary_keyword(tag):
"""Return the official DICOM standard (since 2011) keyword for the tag"""
return get_entry(tag)[4]
# Set up a translation table for "cleaning" DICOM descriptions
# for backwards compatibility pydicom < 0.9.7 (before DICOM keywords)
# Translation is different with unicode - see .translate() at
# http://docs.python.org/library/stdtypes.html#string-methods
chars_to_remove = r""" !@#$%^&*(),;:.?\|{}[]+-="'’/"""
if in_py3: # i.e. unicode strings
translate_table = dict((ord(char), None) for char in chars_to_remove)
else:
import string
translate_table = string.maketrans('', '')
def keyword_for_tag(tag):
"""Return the DICOM keyword for the given tag. Replaces old CleanName()
method using the 2011 DICOM standard keywords instead.
Will return GroupLength for group length tags,
and returns empty string ("") if the tag doesn't exist in the dictionary.
"""
try:
return dictionary_keyword(tag)
except KeyError:
return ""
def CleanName(tag):
"""Return the dictionary descriptive text string but without bad characters.
Used for e.g. *named tags* of Dataset instances (before DICOM keywords were
part of the standard)
"""
tag = Tag(tag)
if tag not in DicomDictionary:
if tag.element == 0: # 0=implied group length in DICOM versions < 3
return "GroupLength"
else:
return ""
s = dictionary_description(tag) # Descriptive name in dictionary
# remove blanks and nasty characters
if in_py3:
s = s.translate(translate_table)
else:
s = s.translate(translate_table, chars_to_remove)
# Take "Sequence" out of name (pydicom < 0.9.7)
# e..g "BeamSequence"->"Beams"; "ReferencedImageBoxSequence"->"ReferencedImageBoxes"
# 'Other Patient ID' exists as single value AND as sequence so check for it and leave 'Sequence' in
if dictionaryVR(tag) == "SQ" and not s.startswith("OtherPatientIDs"):
if s.endswith("Sequence"):
s = s[:-8] + "s"
if s.endswith("ss"):
s = s[:-1]
if s.endswith("xs"):
s = s[:-1] + "es"
if s.endswith("Studys"):
s = s[:-2] + "ies"
return s
# Provide for the 'reverse' lookup. Given clean name, what is the tag?
logger.debug("Reversing DICOM dictionary so can look up tag from a name...")
NameDict = dict([(CleanName(tag), tag) for tag in DicomDictionary])
keyword_dict = dict([(dictionary_keyword(tag), tag) for tag in DicomDictionary])
def short_name(name):
"""Return a short *named tag* for the corresponding long version.
Return a blank string if there is no short version of the name.
"""
for longname, shortname in shortNames:
if name.startswith(longname):
return name.replace(longname, shortname)
return ""
def long_name(name):
"""Return a long *named tag* for the corresponding short version.
Return a blank string if there is no long version of the name.
"""
for longname, shortname in shortNames:
if name.startswith(shortname):
return name.replace(shortname, longname)
return ""
def tag_for_name(name):
"""Return the dicom tag corresponding to name, or None if none exist."""
if name in keyword_dict: # the usual case
return keyword_dict[name]
# If not an official keyword, check the old style pydicom names
if name in NameDict:
tag = NameDict[name]
msg = ("'%s' as tag name has been deprecated; use official DICOM keyword '%s'"
% (name, dictionary_keyword(tag)))
warnings.warn(msg, DeprecationWarning)
return tag
# check if is short-form of a valid name
longname = long_name(name)
if longname:
return NameDict.get(longname, None)
return None
def all_names_for_tag(tag):
"""Return a list of all (long and short) names for the tag"""
longname = keyword_for_tag(tag)
shortname = short_name(longname)
names = [longname]
if shortname:
names.append(shortname)
return names
# PRIVATE DICTIONARY handling
# functions in analogy with those of main DICOM dict
def get_private_entry(tag, private_creator):
"""Return the tuple (VR, VM, name, is_retired) from a private dictionary"""
tag = Tag(tag)
try:
private_dict = private_dictionaries[private_creator]
except KeyError:
raise KeyError("Private creator {0} not in private dictionary".format(private_creator))
# private elements are usually agnostic for "block" (see PS3.5-2008 7.8.1 p44)
# Some elements in _private_dict are explicit; most have "xx" for high-byte of element
# Try exact key first, but then try with "xx" in block position
try:
dict_entry = private_dict[tag]
except KeyError:
# so here put in the "xx" in the block position for key to look up
group_str = "%04x" % tag.group
elem_str = "%04x" % tag.elem
key = "%sxx%s" % (group_str, elem_str[-2:])
if key not in private_dict:
raise KeyError("Tag {0} not in private dictionary for private creator {1}".format(key, private_creator))
dict_entry = private_dict[key]
return dict_entry
def private_dictionary_description(tag, private_creator):
"""Return the descriptive text for the given dicom tag."""
return get_private_entry(tag, private_creator)[2]
def private_dictionaryVM(tag, private_creator):
"""Return the dicom value multiplicity for the given dicom tag."""
return get_private_entry(tag, private_creator)[1]
def private_dictionaryVR(tag, private_creator):
"""Return the dicom value representation for the given dicom tag."""
return get_private_entry(tag, private_creator)[0]
| Python |
# valuerep.py
"""Special classes for DICOM value representations (VR)"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from decimal import Decimal
import dicom.config
from dicom.multival import MultiValue
from dicom import in_py3
default_encoding = "iso8859" # can't import from charset or get circular import
# For reading/writing data elements, these ones have longer explicit VR format
extra_length_VRs = ('OB', 'OW', 'OF', 'SQ', 'UN', 'UT')
# VRs that can be affected by character repertoire in (0008,0005) Specific Character Set
# See PS-3.5 (2011), section 6.1.2 Graphic Characters
text_VRs = ('SH', 'LO', 'ST', 'LT', 'UT') # and PN, but it is handled separately.
class DS(Decimal):
"""Store values for DICOM VR of DS (Decimal String).
Note: if constructed by an empty string, returns the empty string,
not an instance of this class.
"""
def __new__(cls, val):
"""Create an instance of DS object, or return a blank string if one is
passed in, e.g. from a type 2 DICOM blank value.
"""
# Store this value here so that if the input string is actually a valid
# string but decimal.Decimal transforms it to an invalid string it will
# still be initialized properly
enforce_length = dicom.config.enforce_valid_values
# DICOM allows spaces around the string, but python doesn't, so clean it
if isinstance(val, (str, unicode)):
val = val.strip()
# If the input string is actually invalid that we relax the valid
# value constraint for this particular instance
if len(val) <= 16:
enforce_length = False
if val == '':
return val
if isinstance(val, float) and not dicom.config.allow_DS_float:
msg = ("DS cannot be instantiated with a float value, unless "
"config.allow_DS_float is set to True. It is recommended to "
"convert to a string instead, with the desired number of digits, "
"or use Decimal.quantize and pass a Decimal instance.")
raise TypeError(msg)
if not isinstance(val, Decimal):
val = super(DS, cls).__new__(cls, val)
if len(str(val)) > 16 and enforce_length:
msg = ("DS value representation must be <= 16 characters by DICOM "
"standard. Initialize with a smaller string, or set config.enforce_valid_values "
"to False to override, "
"or use Decimal.quantize() and initialize with a Decimal instance.")
raise OverflowError(msg)
return val
def __init__(self, val):
"""Store the original string if one given, for exact write-out of same
value later. E.g. if set '1.23e2', Decimal would write '123', but DS
will use the original
"""
# ... also if user changes a data element value, then will get
# a different Decimal, as Decimal is immutable.
if isinstance(val, (str, unicode)):
self.original_string = val
def __str__(self):
if hasattr(self, 'original_string') and len(self.original_string) <= 16:
return self.original_string
else:
return super(DS, self).__str__()
def __repr__(self):
return "'" + str(self) + "'"
class IS(int):
"""Derived class of int. Stores original integer string for exact rewriting
of the string originally read or stored.
"""
# Unlikely that str(int) will not be the same as the original, but could happen
# with leading zeros.
def __new__(cls, val):
"""Create instance if new integer string"""
if isinstance(val, (str, unicode)) and val.strip() == '':
return ''
newval = super(IS, cls).__new__(cls, val)
# check if a float or Decimal passed in, then could have lost info,
# and will raise error. E.g. IS(Decimal('1')) is ok, but not IS(1.23)
if isinstance(val, (float, Decimal)) and newval != val:
raise TypeError("Could not convert value to integer without loss")
# Checks in case underlying int is >32 bits, DICOM does not allow this
if (newval < -2 ** 31 or newval >= 2 ** 31) and dicom.config.enforce_valid_values:
message = "Value exceeds DICOM limits of -2**31 to (2**31 - 1) for IS"
raise OverflowError(message)
return newval
def __init__(self, val):
# If a string passed, then store it
if isinstance(val, (str, unicode)):
self.original_string = val
def __repr__(self):
if hasattr(self, 'original_string'):
return "'" + self.original_string + "'"
else:
return "'" + int.__str__(self) + "'"
def MultiString(val, valtype=str):
"""Split a bytestring by delimiters if there are any
val -- DICOM bytestring to split up
valtype -- default str, but can be e.g. UID to overwrite to a specific type
"""
# Remove trailing blank used to pad to even length
# 2005.05.25: also check for trailing 0, error made in PET files we are converting
if val and (val.endswith(b' ') or val.endswith(b'\x00')):
val = val[:-1]
if in_py3 and isinstance(val, bytes):
val = val.decode(default_encoding)
splitup = [valtype(x) if x else x for x in val.split("\\")]
if len(splitup) == 1:
return splitup[0]
else:
return MultiValue(valtype, splitup)
class PersonNameBase(object):
"""Base class for Person Name classes"""
def __init__(self, val):
"""Initialize the PN properties"""
# Note normally use __new__ on subclassing an immutable, but here we just want
# to do some pre-processing for properties
# PS 3.5-2008 section 6.2 (p.28) and 6.2.1 describes PN. Briefly:
# single-byte-characters=ideographic characters=phonetic-characters
# (each with?):
# family-name-complex^Given-name-complex^Middle-name^name-prefix^name-suffix
self.parse()
def formatted(self, format_str):
"""Return a formatted string according to the format pattern
Use "...%(property)...%(property)..." where property is one of
family_name, given_name, middle_name, name_prefix, name_suffix
"""
return format_str % self.__dict__
def parse(self):
"""Break down the components and name parts"""
self.components = self.split("=")
nComponents = len(self.components)
self.single_byte = self.components[0]
self.ideographic = ''
self.phonetic = ''
if nComponents > 1:
self.ideographic = self.components[1]
if nComponents > 2:
self.phonetic = self.components[2]
if self.single_byte:
name_string = self.single_byte + "^^^^" # in case missing trailing items are left out
parts = name_string.split("^")[:5]
(self.family_name, self.given_name, self.middle_name,
self.name_prefix, self.name_suffix) = parts
else:
(self.family_name, self.given_name, self.middle_name,
self.name_prefix, self.name_suffix) = ('', '', '', '', '')
class PersonName(PersonNameBase, str):
"""Human-friendly class to hold VR of Person Name (PN)
Name is parsed into the following properties:
single-byte, ideographic, and phonetic components (PS3.5-2008 6.2.1)
family_name,
given_name,
middle_name,
name_prefix,
name_suffix
"""
def __new__(cls, val):
"""Return instance of the new class"""
# Check if trying to convert a string that has already been converted
if isinstance(val, PersonName):
return val
return super(PersonName, cls).__new__(cls, val)
def family_comma_given(self):
"""Return name as 'Family-name, Given-name'"""
return self.formatted("%(family_name)s, %(given_name)s")
# def __str__(self):
# return str(self.byte_string)
# XXX need to process the ideographic or phonetic components?
# def __len__(self):
# return len(self.byte_string)
class PersonNameUnicode(PersonNameBase, unicode):
"""Unicode version of Person Name"""
def __new__(cls, val, encodings):
"""Return unicode string after conversion of each part
val -- the PN value to store
encodings -- a list of python encodings, generally found
from dicom.charset.python_encodings mapping
of values in DICOM data element (0008,0005).
"""
from dicom.charset import clean_escseq # in here to avoid circular import
# XXX At this point we should allow unicode or bytes as input, but if
# it is in unicode we will have to convert it to re-encode it later
if in_py3 and isinstance(val, str):
val = bytes(val, default_encoding)
# Make the possible three character encodings explicit:
if not isinstance(encodings, list):
encodings = [encodings] * 3
if len(encodings) == 2:
encodings.append(encodings[1])
components = val.split(b"=")
# Remove the first encoding if only one component is present
if (len(components) == 1):
del encodings[0]
unicomponents = [clean_escseq(
unicode(components[i], encodings[i]), encodings)
for i, component in enumerate(components)]
new_val = u"=".join(unicomponents)
return unicode.__new__(cls, new_val)
def __init__(self, val, encodings):
self.encodings = encodings
PersonNameBase.__init__(self, val)
def family_comma_given(self):
"""Return name as 'Family-name, Given-name'"""
return self.formatted("%(family_name)u, %(given_name)u")
| Python |
# run_tests.py
"""Call all the unit test files in the test directory starting with 'test'"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os
import os.path
import sys
import unittest
# Get the directory test_dir where the test scripts are
from pkg_resources import Requirement, resource_filename
test_dir = resource_filename(Requirement.parse("pydicom"), "dicom/test")
class MyTestLoader(object):
def loadTestsFromNames(self, *args):
# Simplest to change to directory where test_xxx.py files are
save_dir = os.getcwd()
if test_dir:
os.chdir(test_dir)
filenames = os.listdir(".")
module_names = [f[:-3] for f in filenames
if f.startswith("test") and f.endswith(".py")]
# Load all the tests
suite = unittest.TestSuite()
for module_name in module_names:
module_dotted_name = "dicom.test." + module_name
test = unittest.defaultTestLoader.loadTestsFromName(
module_dotted_name)
suite.addTest(test)
os.chdir(save_dir)
return suite
if __name__ == "__main__":
# Get the tests -- in format used by Distribute library
# to run under 'python setup.py test'
suite = MyTestLoader().loadTestsFromNames()
# Run the tests
verbosity = 1
args = sys.argv
if len(args) > 1 and (args[1] == "-v" or args[1] == "--verbose"):
verbosity = 2
runner = unittest.TextTestRunner(verbosity=verbosity)
# Switch directories to test DICOM files, used by many of the tests
save_dir = os.getcwd()
testfiles_dir = resource_filename(Requirement.parse("pydicom"),
"dicom/testfiles")
os.chdir(testfiles_dir)
runner.run(suite)
os.chdir(save_dir)
| Python |
# version_dep.py
"""Holds test code that is dependent on certain python versions"""
# Copyright (c) 2009-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import warnings
def capture_warnings(function, *func_args, **func_kwargs):
"""Capture function result and warnings.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = function(*func_args, **func_kwargs)
all_warnings = w
return result, [str(warning.message) for warning in all_warnings]
| Python |
# warncheck.py
#
import warnings
import unittest
from sys import version_info
from dicom.test.version_dep import capture_warnings
def assertWarns(self, warn_msg, function, *func_args, **func_kwargs):
"""
Check that the function generates the expected warning
with the arguments given.
warn_msg -- part of the warning string, any warnings should contain this
function -- the function to call (expected to issue a warning)
func_args -- positional arguments to the function
func_kwargs -- keyword arguments to the function
Return the function return value.
"""
result, all_warnings = capture_warnings(function, *func_args,
**func_kwargs)
msg = "Expected one warning; got {0:d}"
self.assertTrue(len(all_warnings) == 1, msg.format(len(all_warnings)))
msg = "Expected warning message '{0:s}...'; got '{1:s}'"
self.assertTrue(warn_msg in all_warnings[0],
msg.format(warn_msg, all_warnings[0]))
return result
def test_warning(the_warning):
if the_warning:
warnings.warn(the_warning)
class WarnTests(unittest.TestCase):
def testWarn(self):
"""Test that assertWarns works as expected"""
assertWarns(self, "Look", test_warning, "Look out")
if __name__ == "__main__":
unittest.main()
| Python |
# __init__.py
| Python |
# raw_convert_test.py
"""Try reading a large RTSTRUCT file, profiling how much time it takes"""
# Copyright (c) 2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os.path
import os
import sys
# EDIT THIS SECTION --------------------------
# to point to local temp directory
tempfile = "/tmp/pydicom_stats"
read_filename = r"/Users/darcy/hg/pydicom/source/dicom/testfiles/RStest.dcm"
write_filename = "/tmp/write_test.dcm"
import dicom
from io import BytesIO
import cProfile
import pstats
import sys
def test_full_read(filename):
dataset = dicom.read_file(filename)
return dataset
def test_convert_from_raw(dataset):
s = str(dataset)
def test_write_file(dataset, write_filename):
dataset.save_as(write_filename)
if __name__ == "__main__":
runs = ['ds=test_full_read(read_filename)',
'test_convert_from_raw(ds)',
'test_write_file(ds, write_filename)',
]
for testrun in runs:
cProfile.run(testrun, tempfile)
p = pstats.Stats(tempfile)
print "---------------"
print testrun
print "---------------"
p.strip_dirs().sort_stats('time').print_stats(8)
# Clear disk cache for next run?
# import sys
# if not on_windows:
# prompt= "Run purge command (linux/Mac OS X) to clear disk cache?(N):"
# answer = raw_input(prompt)
# if answer.lower() == "y":
# print "Running 'purge'. Please wait..."
# os.system("purge")
| Python |
# time_test.py
"""Try reading large sets of files, profiling how much time it takes"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os.path
import os
import sys
on_windows = sys.platform.startswith("win")
# EDIT THIS SECTION --------------------------
# to point to local temp directory, and to a set of >400 DICOM files of same size to work on
# I used images freely available from http://pcir.org
if on_windows:
tempfile = "c:/temp/pydicom_stats"
location_base = r"z:/testdicom/"
else:
tempfile = "/tmp/pydicom_stats"
location_base = r"/Users/darcy/testdicom/"
# location_base = r"/Volumes/Disk 1/testdicom/" # Network disk location
locations = ["77654033_19950903/77654033/19950903/CT2/",
"98890234_20010101/98890234/20010101/CT5/",
"98890234_20010101/98890234/20010101/CT6/",
"98890234_20010101/98890234/20010101/CT7/",
]
locations = [os.path.join(location_base, location) for location in locations]
# -------------------------------------------------------
import glob
import dicom
from dicom.filereader import read_partial, _at_pixel_data
from io import BytesIO
from time import time
import cProfile
import pstats
import sys
import random
rp = read_partial
filenames = []
for location in locations:
loc_list = glob.glob(os.path.join(location, "*"))
filenames.extend((x for x in loc_list if not x.startswith(".")))
assert len(filenames) >= 400, "Need at least 400 files" # unless change slices below
print
random.shuffle(filenames) # to make sure no bias for any particular file
print "Sampling from %d files" % len(filenames), ". Each test gets 100 distinct files"
print "Test order is randomized too..."
# Give each test it's own set of files, to avoid reading something in cache from previous test
filenames1 = filenames[:100] # keep the time to a reasonable amount (~2-25 sec)
filenames2 = filenames[100:200]
filenames3 = filenames[200:300]
filenames4 = filenames[300:400]
def test_full_read():
rf = dicom.read_file
datasets = [rf(fn) for fn in filenames1]
return datasets
def test_partial():
rp = read_partial
ds = [rp(open(fn, 'rb'), stop_when=_at_pixel_data) for fn in filenames2]
def test_mem_read_full():
rf = dicom.read_file
str_io = BytesIO
memory_files = (str_io(open(fn, 'rb').read()) for fn in filenames3)
ds = [rf(memory_file) for memory_file in memory_files]
def test_mem_read_small():
rf = dicom.read_file
str_io = BytesIO # avoid global lookup, make local instead
memory_files = (str_io(open(fn, 'rb').read(4000)) for fn in filenames4)
ds = [rf(memory_file) for memory_file in memory_files]
def test_python_read_files():
all_files = [open(fn, 'rb').read() for fn in filenames4]
if __name__ == "__main__":
runs = ['datasets=test_full_read()',
# 'test_partial()',
# 'test_mem_read_full()',
# 'test_mem_read_small()',
'test_python_read_files()',
]
random.shuffle(runs)
for testrun in runs:
cProfile.run(testrun, tempfile)
p = pstats.Stats(tempfile)
print "---------------"
print testrun
print "---------------"
p.strip_dirs().sort_stats('time').print_stats(5)
print "Confirming file read worked -- check for data elements near end"
try:
image_sizes = [len(ds.PixelData) for ds in datasets]
except Exception as e:
print "Failed to access dataset data for all files\nError:" + str(e)
else:
print "Reads checked ok."
# Clear disk cache for next run?
import sys
if not on_windows:
prompt = "Run purge command (linux/Mac OS X) to clear disk cache?...(N):"
answer = raw_input(prompt)
if answer.lower() == "y":
print "Running 'purge'. Please wait..."
os.system("purge")
| Python |
# __init__.py
# Mark the folder as a python package
| Python |
# _write_stds.py
"""Snippets for what a particular dataset (including nested sequences)
should look like after writing in different expl/impl Vr and endian combos,
as well as undefined length sequences and items
"""
# Implicit VR, little endian, SQ's with defined lengths
impl_LE_deflen_std_hex = (
"10 00 10 00 " # (0010, 0010) Patient's Name
"0c 00 00 00 " # length 12
"4e 61 6d 65 5e 50 61 74 69 65 6e 74 " # "Name^Patient"
"06 30 39 00 " # (3006, 0039) ROI Contour Sequence
"5a 00 00 00 " # length 90
"fe ff 00 e0 " # (fffe, e000) Item Tag
"52 00 00 00 " # length 82
"06 30 40 00 " # (3006, 0040) Contour Sequence
"4a 00 00 00 " # length 74
"fe ff 00 e0 " # (fffe, e000) Item Tag
"1a 00 00 00 " # length 26
"06 30 48 00 " # (3006, 0048) Contour Number
"02 00 00 00 " # length 2
"31 20 " # "1 "
"06 30 50 00 " # (3006, 0050) Contour Data
"08 00 00 00 " # length 8
"32 5c 34 5c 38 5c 31 36 " # "2\4\8\16"
"fe ff 00 e0 " # (fffe, e000) Item Tag
"20 00 00 00 " # length 32
"06 30 48 00 " # (3006, 0048) Contour Number
"02 00 00 00 " # length 2
"32 20 " # "2 "
"06 30 50 00 " # (3006, 0050) Contour Data
"0e 00 00 00 " # length 14
"33 32 5c 36 34 5c 31 32 38 5c 31 39 36 20 "
# "32\64\128\196 "
)
# Implicit VR, big endian, SQ's with defined lengths
# Realized after coding this that there is no Impl VR big endian in DICOM std;
# however, it seems to exist as a GE private transfer syntax.
# Will leave this here for now.
impl_BE_deflen_std_hex = (
"00 10 00 10 " # (0010, 0010) Patient's Name
"00 00 00 0c " # length 12
"4e 61 6d 65 5e 50 61 74 69 65 6e 74 " # "Name^Patient"
"30 06 00 39 " # (3006, 0039) ROI Contour Sequence
"00 00 00 5a " # length 90
"ff fe e0 00 " # (fffe, e000) Item Tag
"00 00 00 52 " # length 82
"30 06 00 40 " # (3006, 0040) Contour Sequence
"00 00 00 4a " # length 74
"ff fe e0 00 " # (fffe, e000) Item Tag
"00 00 00 1a " # length 26
"30 06 00 48 " # (3006, 0048) Contour Number
"00 00 00 02 " # length 2
"31 20 " # "1 "
"30 06 00 50 " # (3006, 0050) Contour Data
"00 00 00 08 " # length 8
"32 5c 34 5c 38 5c 31 36 " # "2\4\8\16"
"ff fe e0 00 " # (fffe, e000) Item Tag
"20 00 00 00 " # length 32
"30 06 00 48 " # (3006, 0048) Contour Number
"00 00 00 02 " # length 2
"32 20 " # "2 "
"30 06 00 50 " # (3006, 0050) Contour Data
"00 00 00 0e " # length 14
"33 32 5c 36 34 5c 31 32 38 5c 31 39 36 20 "
# "32\64\128\196 "
)
| Python |
# multival.py
"""Code for multi-value data elements values, or any list of items that
must all be the same type.
"""
# Copyright (c) 2009-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT-style license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
class MultiValue(list):
"""Class to hold any multi-valued DICOM value, or any list of items
that are all of the same type.
This class enforces that any items added to the list are of the correct type,
by calling the constructor on any items that are added. Therefore, the
constructor must behave nicely if passed an object that is already its type.
The constructor should raise TypeError if the item cannot be converted.
"""
def __init__(self, type_constructor, iterable):
"""Initialize the list of values
:param type_constructor: a constructor for the required type for all list
items. Could be the class, or a factory function.
For DICOM mult-value data elements, this will be the
class or type corresponding to the VR.
:param iterable: an iterable (e.g. list, tuple) of items to initialize
the MultiValue list
"""
self.type_constructor = type_constructor
super(MultiValue, self).__init__([type_constructor(x) for x in iterable])
def append(self, val):
super(MultiValue, self).append(self.type_constructor(val))
def extend(self, list_of_vals):
super(MultiValue, self).extend((self.type_constructor(x) for x in list_of_vals))
def insert(self, position, val):
super(MultiValue, self).insert(position, self.type_constructor(val))
def __setitem__(self, i, val):
"""Set an item of the list, making sure it is of the right VR type"""
if isinstance(i, slice):
val = [self.type_constructor(x) for x in val]
else:
val = self.type_constructor(val)
super(MultiValue, self).__setitem__(i, val)
def __str__(self):
lines = [str(x) for x in self]
return "[" + ", ".join(lines) + "]"
__repr__ = __str__
| Python |
# encaps.py
"""Routines for working with encapsulated (compressed) data
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# Encapsulated Pixel Data -- 3.5-2008 A.4
# Encapsulated Pixel data is in a number of Items (start with Item tag (0xFFFE,E000) and ending ultimately with SQ delimiter and Item Length field of 0 (no value),
# just like SQ of undefined length, but here Item must have explicit length.
# PixelData length is Undefined Length if encapsulated
# First item is an Offset Table. It can have 0 length and no value, or it can have a table of US pointers to first byte of the Item tag starting each *Frame*,
# where 0 of pointer is at first Item tag following the Offset table
# If a single frame, it may be 0 length/no value, or it may have a single pointer (0).
import logging
logger = logging.getLogger('pydicom')
from dicom.filebase import DicomBytesIO
from dicom.tag import ItemTag, SequenceDelimiterTag
def defragment_data(data):
"""Read encapsulated data and return one continuous string
data -- string of encapsulated data, typically dataset.PixelData
Return all fragments concatenated together as a byte string
If PixelData has multiple frames, then should separate out before calling this routine.
"""
# Convert data into a memory-mapped file
fp = DicomBytesIO(data)
fp.is_little_endian = True # DICOM standard requires this
BasicOffsetTable = read_item(fp)
seq = []
while True:
item = read_item(fp)
if not item: # None is returned if get to Sequence Delimiter
break
seq.append(item)
# XXX should
return "".join(seq)
# read_item modeled after filereader.ReadSequenceItem
def read_item(fp):
"""Read and return a single Item in the fragmented data stream"""
try:
tag = fp.read_tag()
except EOFError: # already read delimiter before passing data here, so should just run out
return None
if tag == SequenceDelimiterTag: # No more items, time for sequence to stop reading
length = fp.read_UL()
logger.debug("%04x: Sequence Delimiter, length 0x%x", fp.tell() - 8, length)
if length != 0:
logger.warning("Expected 0x00000000 after delimiter, found 0x%x, at data position 0x%x", length, fp.tell() - 4)
return None
if tag != ItemTag:
logger.warning("Expected Item with tag %s at data position 0x%x", ItemTag, fp.tell() - 4)
length = fp.read_UL()
else:
length = fp.read_UL()
logger.debug("%04x: Item, length 0x%x", fp.tell() - 8, length)
if length == 0xFFFFFFFFL:
raise ValueError("Encapsulated data fragment had Undefined Length at data position 0x%x" % fp.tell() - 4)
item_data = fp.read(length)
return item_data
| Python |
# values.py
"""Functions for converting values of DICOM data elements to proper python types
"""
# Copyright (c) 2010-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from struct import unpack, calcsize, pack
import logging
logger = logging.getLogger('pydicom')
from dicom.valuerep import PersonName, MultiString
from dicom.multival import MultiValue
import dicom.UID
from dicom.tag import Tag, TupleTag, SequenceDelimiterTag
from dicom.datadict import dictionaryVR
from dicom.filereader import read_sequence
from io import BytesIO
from dicom.valuerep import DS, IS
from dicom.charset import default_encoding
from dicom import in_py3
def convert_tag(byte_string, is_little_endian, offset=0):
if is_little_endian:
struct_format = "<HH"
else:
struct_format = ">HH"
return TupleTag(unpack(struct_format, byte_string[offset:offset + 4]))
def convert_ATvalue(byte_string, is_little_endian, struct_format=None):
"""Read and return AT (tag) data_element value(s)"""
length = len(byte_string)
if length == 4:
return convert_tag(byte_string, is_little_endian)
# length > 4
if length % 4 != 0:
logger.warn("Expected length to be multiple of 4 for VR 'AT', got length %d at file position 0x%x", length, fp.tell() - 4)
return MultiValue(Tag, [convert_tag(byte_string, is_little_endian, offset=x)
for x in range(0, length, 4)])
def convert_DS_string(byte_string, is_little_endian, struct_format=None):
"""Read and return a DS value or list of values"""
return MultiString(byte_string, valtype=DS)
def convert_IS_string(byte_string, is_little_endian, struct_format=None):
"""Read and return an IS value or list of values"""
return MultiString(byte_string, valtype=IS)
def convert_numbers(byte_string, is_little_endian, struct_format):
"""Read a "value" of type struct_format from the dicom file. "Value" can be more than one number"""
endianChar = '><'[is_little_endian]
bytes_per_value = calcsize("=" + struct_format) # "=" means use 'standard' size, needed on 64-bit systems.
length = len(byte_string)
if length % bytes_per_value != 0:
logger.warn("Expected length to be even multiple of number size")
format_string = "%c%u%c" % (endianChar, length // bytes_per_value, struct_format)
value = unpack(format_string, byte_string)
if len(value) == 1:
return value[0]
else:
return list(value) # convert from tuple to a list so can modify if need to
def convert_OBvalue(byte_string, is_little_endian, struct_format=None):
"""Return the raw bytes from reading an OB value"""
return byte_string
def convert_OWvalue(byte_string, is_little_endian, struct_format=None):
"""Return the raw bytes from reading an OW value rep
Note: pydicom does NOT do byte swapping, except in
dataset.pixel_array function
"""
return convert_OBvalue(byte_string, is_little_endian) # for now, Maybe later will have own routine
def convert_PN(byte_string, is_little_endian, struct_format=None):
"""Read and return string(s) as PersonName instance(s)"""
return MultiString(byte_string, valtype=PersonName)
def convert_string(byte_string, is_little_endian, struct_format=None):
"""Read and return a string or strings"""
return MultiString(byte_string)
def convert_single_string(byte_string, is_little_endian, struct_format=None):
"""Read and return a single string (backslash character does not split)"""
if byte_string and byte_string.endswith(b' '):
byte_string = byte_string[:-1]
if in_py3:
byte_string = byte_string.decode(default_encoding)
return byte_string
def convert_SQ(byte_string, is_implicit_VR, is_little_endian, offset=0):
"""Convert a sequence that has been read as bytes but not yet parsed."""
fp = BytesIO(byte_string)
seq = read_sequence(fp, is_implicit_VR, is_little_endian, len(byte_string), offset)
return seq
def convert_UI(byte_string, is_little_endian, struct_format=None):
"""Read and return a UI values or values"""
# Strip off 0-byte padding for even length (if there)
if byte_string and byte_string.endswith(b'\0'):
byte_string = byte_string[:-1]
return MultiString(byte_string, dicom.UID.UID)
def convert_UN(byte_string, is_little_endian, struct_format=None):
"""Return a byte string for a VR of 'UN' (unknown)"""
return byte_string
def convert_value(VR, raw_data_element):
"""Return the converted value (from raw bytes) for the given VR"""
tag = Tag(raw_data_element.tag)
if VR not in converters:
raise NotImplementedError("Unknown Value Representation '{0}'".format(VR))
# Look up the function to convert that VR
# Dispatch two cases: a plain converter, or a number one which needs a format string
if isinstance(converters[VR], tuple):
converter, num_format = converters[VR]
else:
converter = converters[VR]
num_format = None
byte_string = raw_data_element.value
is_little_endian = raw_data_element.is_little_endian
is_implicit_VR = raw_data_element.is_implicit_VR
# Not only two cases. Also need extra info if is a raw sequence
if VR != "SQ":
value = converter(byte_string, is_little_endian, num_format)
else:
value = convert_SQ(byte_string, is_implicit_VR, is_little_endian, raw_data_element.value_tell)
return value
# converters map a VR to the function to read the value(s).
# for convert_numbers, the converter maps to a tuple (function, struct_format)
# (struct_format in python struct module style)
converters = {'UL': (convert_numbers, 'L'),
'SL': (convert_numbers, 'l'),
'US': (convert_numbers, 'H'),
'SS': (convert_numbers, 'h'),
'FL': (convert_numbers, 'f'),
'FD': (convert_numbers, 'd'),
'OF': (convert_numbers, 'f'),
'OB': convert_OBvalue,
'UI': convert_UI,
'SH': convert_string,
'DA': convert_string,
'TM': convert_string,
'CS': convert_string,
'PN': convert_PN,
'LO': convert_string,
'IS': convert_IS_string,
'DS': convert_DS_string,
'AE': convert_string,
'AS': convert_string,
'LT': convert_single_string,
'SQ': convert_SQ,
'UN': convert_UN,
'AT': convert_ATvalue,
'ST': convert_string,
'OW': convert_OWvalue,
'OW/OB': convert_OBvalue, # note OW/OB depends on other items, which we don't know at read time
'OB/OW': convert_OBvalue,
'OW or OB': convert_OBvalue,
'OB or OW': convert_OBvalue,
'US or SS': convert_OWvalue,
'US or SS or OW': convert_OWvalue,
'US\\US or SS\\US': convert_OWvalue,
'DT': convert_string,
'UT': convert_single_string,
}
if __name__ == "__main__":
pass
| Python |
# charlist.py
"""List summary info for the test files in the charset directory"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import logging
logging.basicConfig(level=logging.INFO,
format='%(message)s')
if __name__ == "__main__":
from glob import glob
import dicom
# Get list of all DICOM files
names = glob("*.dcm")
# Collect summary information from the files
files_info = []
for name in names:
ds = dicom.read_file(name)
ds.decode()
files_info.append((name, ds.SpecificCharacterSet, ds.PatientsName))
# Show the information
format = "%-16s %-40s %s"
logging.info(format % ("Filename", "Character Sets", "Patient's Name"))
logging.info(format % ("--------", "--------------", "--------------"))
for file_info in files_info:
logging.info(format % file_info)
if "chrFrenMulti.dcm" in names:
logging.info("\nOther\n=====")
logging.info(
"chrFrenMulti.dcm is a modified version of chrFren.dcm"
" with multi-valued PN and LO for testing decoding"
)
| Python |
# DicomInfo.py
"""
Read a DICOM file and print some or all of its values.
Usage: python DicomInfo.py imagefile [-v]
-v (optional): Verbose mode, prints all DICOM data elements
Without the -v option, a few of the most common dicom file
data elements are printed: some info about the patient and about
the image.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
import sys
import dicom
# check command line arguments make sense
if not 1 < len(sys.argv) < 4:
print(__doc__)
sys.exit()
# read the file
filename = sys.argv[1]
dataset = dicom.read_file(filename)
# Verbose mode:
if len(sys.argv) == 3:
if sys.argv[2] == "-v": # user asked for all info
print(dataset)
else: # unknown command argument
print(__doc__)
sys.exit()
# Normal mode:
print()
print("Filename.........:", filename)
print("Storage type.....:", dataset.SOPClassUID)
print()
pat_name = dataset.PatientName
display_name = pat_name.family_name + ", " + pat_name.given_name
print("Patient's name...:", display_name)
print("Patient id.......:", dataset.PatientID)
print("Modality.........:", dataset.Modality)
print("Study Date.......:", dataset.StudyDate)
if 'PixelData' in dataset:
rows = int(dataset.Rows)
cols = int(dataset.Columns)
print("Image size.......: {rows:d} x {cols:d}, {size:d} bytes".format(
rows=rows, cols=cols, size=len(dataset.PixelData)))
if 'PixelSpacing' in dataset:
print("Pixel spacing....:", dataset.PixelSpacing)
# use .get() if not sure the item exists, and want a default value if missing
print("Slice location...:", dataset.get('SliceLocation', "(missing)"))
| Python |
# ListBeams.py
"""Given an RTPLAN DICOM file, list basic info for the beams in it
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
import dicom
usage = """python ListBeams.py rtplan.dcm"""
def ListBeams(plan_dataset):
"""Return a string summarizing the RTPLAN beam information in the dataset"""
lines = ["{name:^13s} {num:^8s} {gantry:^8s} {ssd:^11s}".format(
name="Beam name", num="Number", gantry="Gantry", ssd="SSD (cm)")]
for beam in plan_dataset.BeamSequence:
cp0 = beam.ControlPointSequence[0]
SSD = float(cp0.SourcetoSurfaceDistance / 10)
lines.append("{b.BeamName:^13s} {b.BeamNumber:8d} "
"{gantry:8.1f} {ssd:8.1f}".format(b=beam,
gantry=cp0.GantryAngle, ssd=SSD))
return "\n".join(lines)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print(usage)
sys.exit(-1)
rtplan = dicom.read_file(sys.argv[1])
print(ListBeams(rtplan))
| Python |
# show_charset_name.py
"""Very simple app to display unicode person names"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import Tkinter
from dicom.valuerep import PersonName, PersonNameUnicode
default_encoding = 'iso8859'
root = Tkinter.Tk()
# root.geometry("%dx%d%+d%+d" % (800, 600, 0, 0))
person_names = [
PersonNameUnicode(
"""Yamada^Tarou=\033$B;3ED\033(B^\033$BB@O:\033(B=\033$B$d$^$@\033(B^\033$B$?$m$&\033(B""",
[default_encoding, 'iso2022_jp']), # DICOM standard 2008-PS3.5 H.3 p 98
PersonNameUnicode(
"""Wang^XiaoDong=\xcd\xf5\x5e\xd0\xa1\xb6\xab=""",
[default_encoding, 'GB18030']), # DICOM standard 2008-PS3.5 J.3 p 105
PersonNameUnicode(
"""Wang^XiaoDong=\xe7\x8e\x8b\x5e\xe5\xb0\x8f\xe6\x9d\xb1=""",
[default_encoding, 'UTF-8']), # DICOM standard 2008-PS3.5 J.1 p 104
PersonNameUnicode(
"""Hong^Gildong=\033$)C\373\363^\033$)C\321\316\324\327=\033$)C\310\253^\033$)C\261\346\265\277""",
[default_encoding, 'euc_kr']), # DICOM standard 2008-PS3.5 I.2 p 101
]
for person_name in person_names:
label = Tkinter.Label(text=person_name)
label.pack()
root.mainloop()
| Python |
# anonymize.py
"""Read a dicom file (or directory of files), partially "anonymize" it (them),
by replacing Person names, patient id, optionally remove curves
and private tags, and write result to a new file (directory)
This is an example only; use only as a starting point.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# Use at your own risk!!
# Many more items need to be addressed for proper de-identifying DICOM data.
# In particular, note that pixel data could have confidential data "burned in"
# Annex E of PS3.15-2011 DICOM standard document details what must be done to
# fully de-identify DICOM data
from __future__ import print_function
usage = """
Usage:
python anonymize.py dicomfile.dcm outputfile.dcm
OR
python anonymize.py originals_directory anonymized_directory
Note: Use at your own risk. Does not fully de-identify the DICOM data as per
the DICOM standard, e.g in Annex E of PS3.15-2011.
"""
import os
import os.path
import dicom
def anonymize(filename, output_filename, new_person_name="anonymous",
new_patient_id="id", remove_curves=True, remove_private_tags=True):
"""Replace data element values to partly anonymize a DICOM file.
Note: completely anonymizing a DICOM file is very complicated; there
are many things this example code does not address. USE AT YOUR OWN RISK.
"""
# Define call-back functions for the dataset.walk() function
def PN_callback(ds, data_element):
"""Called from the dataset "walk" recursive function for all data elements."""
if data_element.VR == "PN":
data_element.value = new_person_name
def curves_callback(ds, data_element):
"""Called from the dataset "walk" recursive function for all data elements."""
if data_element.tag.group & 0xFF00 == 0x5000:
del ds[data_element.tag]
# Load the current dicom file to 'anonymize'
dataset = dicom.read_file(filename)
# Remove patient name and any other person names
dataset.walk(PN_callback)
# Change ID
dataset.PatientID = new_patient_id
# Remove data elements (should only do so if DICOM type 3 optional)
# Use general loop so easy to add more later
# Could also have done: del ds.OtherPatientIDs, etc.
for name in ['OtherPatientIDs', 'OtherPatientIDsSequence']:
if name in dataset:
delattr(dataset, name)
# Same as above but for blanking data elements that are type 2.
for name in ['PatientBirthDate']:
if name in dataset:
dataset.data_element(name).value = ''
# Remove private tags if function argument says to do so. Same for curves
if remove_private_tags:
dataset.remove_private_tags()
if remove_curves:
dataset.walk(curves_callback)
# write the 'anonymized' DICOM out under the new filename
dataset.save_as(output_filename)
# Can run as a script:
if __name__ == "__main__":
import sys
if len(sys.argv) != 3:
print(usage)
sys.exit()
arg1, arg2 = sys.argv[1:]
if os.path.isdir(arg1):
in_dir = arg1
out_dir = arg2
if os.path.exists(out_dir):
if not os.path.isdir(out_dir):
raise IOError("Input is directory; output name exists but is not a directory")
else: # out_dir does not exist; create it.
os.makedirs(out_dir)
filenames = os.listdir(in_dir)
for filename in filenames:
if not os.path.isdir(os.path.join(in_dir, filename)):
print(filename + "...", end='')
anonymize(os.path.join(in_dir, filename), os.path.join(out_dir, filename))
print("done\r")
else: # first arg not a directory, assume two files given
in_filename = arg1
out_filename = arg2
anonymize(in_filename, out_filename)
print()
| Python |
# myprint.py
"""Example of printing a dataset in your own format"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
def myprint(dataset, indent=0):
"""Go through all items in the dataset and print them with custom format
Modelled after Dataset._pretty_str()
"""
dont_print = ['Pixel Data', 'File Meta Information Version']
indent_string = " " * indent
next_indent_string = " " * (indent + 1)
for data_element in dataset:
if data_element.VR == "SQ": # a sequence
print(indent_string, data_element.name)
for sequence_item in data_element.value:
myprint(sequence_item, indent + 1)
print(next_indent_string + "---------")
else:
if data_element.name in dont_print:
print("""<item not printed -- in the "don't print" list>""")
else:
repr_value = repr(data_element.value)
if len(repr_value) > 50:
repr_value = repr_value[:50] + "..."
print("{0:s} {1:s} = {2:s}".format(indent_string,
data_element.name, repr_value))
if __name__ == "__main__":
import dicom
import sys
usage = """Usage: myprint filename"""
if len(sys.argv) != 2:
print(usage)
sys.exit()
ds = dicom.read_file(sys.argv[1])
myprint(ds)
| Python |
# dicomtree.py
"""Show a dicom file using a hierarchical tree in a graphical window"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
usage = "Usage: python dicomtree.py dicom_filename"
from dicom.valuerep import PersonNameUnicode
import Tix
def RunTree(w, filename):
top = Tix.Frame(w, relief=Tix.RAISED, bd=1)
tree = Tix.Tree(top, options="hlist.columns 2")
tree.pack(expand=1, fill=Tix.BOTH, padx=10, pady=10, side=Tix.LEFT)
# print(tree.hlist.keys()) # use to see the available configure() options
tree.hlist.configure(bg='white', font='Courier 10', indent=30)
tree.hlist.configure(selectbackground='light yellow', gap=150)
box = Tix.ButtonBox(w, orientation=Tix.HORIZONTAL)
# box.add('ok', text='Ok', underline=0, command=w.destroy, width=6)
box.add('exit', text='Exit', underline=0, command=w.destroy, width=6)
box.pack(side=Tix.BOTTOM, fill=Tix.X)
top.pack(side=Tix.TOP, fill=Tix.BOTH, expand=1)
show_file(filename, tree)
def show_file(filename, tree):
tree.hlist.add("root", text=filename)
ds = dicom.read_file(sys.argv[1])
ds.decode() # change strings to unicode
recurse_tree(tree, ds, "root", False)
tree.autosetmode()
def recurse_tree(tree, dataset, parent, hide=False):
# order the dicom tags
for data_element in dataset:
node_id = parent + "." + hex(id(data_element))
if isinstance(data_element.value, unicode):
tree.hlist.add(node_id, text=unicode(data_element))
else:
tree.hlist.add(node_id, text=str(data_element))
if hide:
tree.hlist.hide_entry(node_id)
if data_element.VR == "SQ": # a sequence
for i, dataset in enumerate(data_element.value):
item_id = node_id + "." + str(i + 1)
sq_item_description = data_element.name.replace(" Sequence", "") # XXX not i18n
item_text = "{0:s} {1:d}".format(sq_item_description, i + 1)
tree.hlist.add(item_id, text=item_text)
tree.hlist.hide_entry(item_id)
recurse_tree(tree, dataset, item_id, hide=True)
if __name__ == '__main__':
import sys
import dicom
if len(sys.argv) != 2:
print("Please supply a dicom file name:\n")
print(usage)
sys.exit(-1)
root = Tix.Tk()
root.geometry("{0:d}x{1:d}+{2:d}+{3:d}".format(800, 600, 0, 0))
RunTree(root, sys.argv[1])
root.mainloop()
| Python |
# __init__.py
# Mark the folder as a python package
| Python |
# DicomDiff.py
"""Show the difference between two dicom files.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
usage = """
Usage:
python DicomDiff.py file1 file2
Results printed in python difflib form - indicated by start of each line:
' ' blank means lines the same
'-' means in file1 but "removed" in file2
'+' means not in file1, but "added" in file2
('?' lines from difflib removed - no use here)
"""
import sys
import dicom
import difflib
# only used as a script
if len(sys.argv) != 3:
print(usage)
sys.exit()
datasets = dicom.read_file(sys.argv[1]), \
dicom.read_file(sys.argv[2])
# diflib compare functions require a list of lines, each terminated with newline character
# massage the string representation of each dicom dataset into this form:
rep = []
for dataset in datasets:
lines = str(dataset).split("\n")
lines = [line + "\n" for line in lines] # add the newline to end
rep.append(lines)
diff = difflib.Differ()
for line in diff.compare(rep[0], rep[1]):
if line[0] != "?":
print(line)
| Python |
# write_new.py
"""Simple example of writing a DICOM file from scratch using pydicom.
This example does not produce a DICOM standards compliant file as written,
you will have to change UIDs to valid values and add all required DICOM data
elements
"""
# Copyright (c) 2010-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
import sys
import os.path
import dicom
from dicom.dataset import Dataset, FileDataset
import dicom.UID
if __name__ == "__main__":
print("---------------------------- ")
print("write_new.py example program")
print("----------------------------")
print("Demonstration of writing a DICOM file using pydicom")
print("NOTE: this is only a demo. Writing a DICOM standards compliant file")
print("would require official UIDs, and checking the DICOM standard to ensure")
print("that all required data elements were present.")
print()
if sys.platform.lower().startswith("win"):
filename = r"c:\temp\test.dcm"
filename2 = r"c:\temp\test-explBig.dcm"
else:
homedir = os.path.expanduser("~")
filename = os.path.join(homedir, "test.dcm")
filename2 = os.path.join(homedir, "test-explBig.dcm")
print("Setting file meta information...")
# Populate required values for file meta information
file_meta = Dataset()
file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2' # CT Image Storage
file_meta.MediaStorageSOPInstanceUID = "1.2.3" # !! Need valid UID here for real work
file_meta.ImplementationClassUID = "1.2.3.4" # !!! Need valid UIDs here
print("Setting dataset values...")
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
ds = FileDataset(filename, {}, file_meta=file_meta, preamble="\0" * 128)
# Add the data elements -- not trying to set all required here. Check DICOM standard
ds.PatientName = "Test^Firstname"
ds.PatientID = "123456"
# Set the transfer syntax
ds.is_little_endian = True
ds.is_implicit_VR = True
print("Writing test file", filename)
ds.save_as(filename)
print("File saved.")
# Write as a different transfer syntax
ds.file_meta.TransferSyntaxUID = dicom.UID.ExplicitVRBigEndian # XXX shouldn't need this but pydicom 0.9.5 bug not recognizing transfer syntax
ds.is_little_endian = False
ds.is_implicit_VR = False
print("Writing test file as Big Endian Explicit VR", filename2)
ds.save_as(filename2)
| Python |
# hexutil.py
"""Miscellaneous utility routines relating to hex and byte strings"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from binascii import a2b_hex, b2a_hex
from dicom import in_py3
from dicom.charset import default_encoding
def hex2bytes(hexstring):
"""Return bytestring for a string of hex bytes separated by whitespace
This is useful for creating specific byte sequences for testing, using
python's implied concatenation for strings with comments allowed.
Example:
hex_string = (
"08 00 32 10" # (0008, 1032) SQ "Procedure Code Sequence"
" 08 00 00 00" # length 8
" fe ff 00 e0" # (fffe, e000) Item Tag
)
byte_string = hex2bytes(hex_string)
Note in the example that all lines except the first must start with a space,
alternatively the space could end the previous line.
"""
# This works in both 3.x and 2.x because the first conditional evaluates to
# true in 2.x so the difference in bytes constructor doesn't matter
if isinstance(hexstring, bytes):
return a2b_hex(hexstring.replace(b" ", b""))
else:
return a2b_hex(bytes(hexstring.replace(" ", ""), default_encoding))
def bytes2hex(byte_string):
s = b2a_hex(byte_string)
if in_py3:
s = s.decode()
return " ".join(s[i:i + 2] for i in range(0, len(s), 2))
| Python |
# __init__.py
| Python |
# dump.py
"""Utility functions used in debugging writing and reading"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
from io import BytesIO
def print_character(ordchr):
"""Return a printable character, or '.' for non-printable ones."""
if 31 < ordchr < 126 and ordchr != 92:
return chr(ordchr)
else:
return '.'
def filedump(filename, start_address=0, stop_address=None):
"""Dump out the contents of a file to a standard hex dump 16 bytes wide"""
fp = file(filename, 'rb')
return hexdump(fp, start_address, stop_address)
def datadump(data):
stop_address = len(data) + 1
fp = BytesIO(data)
print(hexdump(fp, 0, stop_address))
def hexdump(file_in, start_address=0, stop_address=None, showAddress=True):
"""Return a formatted string of hex bytes and characters in data.
This is a utility function for debugging file writing.
file_in -- a file-like object to get the bytes to show from"""
str_out = BytesIO()
byteslen = 16 * 3 - 1 # space taken up if row has a full 16 bytes
blanks = ' ' * byteslen
file_in.seek(start_address)
data = True # dummy to start the loop
while data:
if stop_address and file_in.tell() > stop_address:
break
if showAddress:
str_out.write("%04x : " % file_in.tell()) # address at start of line
data = file_in.read(16)
if not data:
break
row = [ord(x) for x in data] # need ord twice below so convert once
byte_string = ' '.join(["%02x" % x for x in row]) # string of two digit hex bytes
str_out.write(byte_string)
str_out.write(blanks[:byteslen - len(byte_string)]) # if not 16, pad
str_out.write(' ')
str_out.write(''.join([print_character(x) for x in row])) # character rep of bytes
str_out.write("\n")
return str_out.getvalue()
def pretty_print(ds, indent=0, indent_chars=" "):
"""Print a dataset directly, with indented levels.
This is just like Dataset._pretty_str, but more useful for debugging as it
prints each item immediately rather than composing a string, making it
easier to immediately see where an error in processing a dataset starts.
"""
strings = []
indentStr = indent_chars * indent
nextIndentStr = indent_chars * (indent + 1)
for data_element in ds:
if data_element.VR == "SQ": # a sequence
fmt_str = "{0:s}{1:s} {2:s} {3:d} item(s) ---"
new_str = fmt_str.format(indentStr, str(data_element.tag),
data_element.name, len(data_element.value))
print(new_str)
for dataset in data_element.value:
pretty_print(dataset, indent + 1)
print(nextIndentStr + "---------")
else:
print(indentStr + repr(data_element))
if __name__ == "__main__":
import sys
filename = sys.argv[1]
start_address = 0
stop_address = None
if len(sys.argv) > 2: # then have start address
start_address = eval(sys.argv[2])
if len(sys.argv) > 3:
stop_address = eval(sys.argv[3])
print(filedump(filename, start_address, stop_address))
| Python |
# charset.py
"""Handle alternate character sets for character strings."""
#
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
import logging
logger = logging.getLogger('pydicom')
from dicom.valuerep import PersonNameUnicode, text_VRs
# Map DICOM Specific Character Set to python equivalent
python_encoding = {
'': 'iso8859', # default character set for DICOM
'ISO_IR 6': 'iso8859', # alias for latin_1 too
'ISO_IR 100': 'latin_1',
'ISO 2022 IR 87': 'iso2022_jp',
'ISO 2022 IR 13': 'shift_jis',
'ISO 2022 IR 149': 'euc_kr', # needs cleanup via clean_escseq()
'ISO_IR 192': 'UTF8', # from Chinese example, 2008 PS3.5 Annex J p1-4
'GB18030': 'GB18030',
'ISO_IR 126': 'iso_ir_126', # Greek
'ISO_IR 127': 'iso_ir_127', # Arab
'ISO_IR 138': 'iso_ir_138', # Hebrew
'ISO_IR 144': 'iso_ir_144', # Russian
}
default_encoding = "iso8859"
def clean_escseq(element, encodings):
"""Remove escape sequences that Python does not remove from
Korean encoding ISO 2022 IR 149 due to the G1 code element.
"""
if 'euc_kr' in encodings:
return element.replace(
"\x1b\x24\x29\x43", "").replace("\x1b\x28\x42", "")
else:
return element
# DICOM PS3.5-2008 6.1.1 (p 18) says:
# default is ISO-IR 6 G0, equiv to common chr set of ISO 8859 (PS3.5 6.1.2.1)
# (0008,0005) value 1 can *replace* the default encoding...
# for VRs of SH, LO, ST, LT, PN and UT (PS3.5 6.1.2.3)...
# with a single-byte character encoding
# if (0008,0005) is multi-valued, then value 1 (or default if blank)...
# is used until code extension escape sequence is hit,
# which can be at start of string, or after CR/LF, FF, or
# in Person Name PN, after ^ or =
# NOTE also that 7.5.3 SEQUENCE INHERITANCE states that if (0008,0005)
# is not present in a sequence item then it is inherited from its parent.
def decode(data_element, dicom_character_set):
"""Apply the DICOM character encoding to the data element
data_element -- DataElement instance containing a value to convert
dicom_character_set -- the value of Specific Character Set (0008,0005),
which may be a single value,
a multiple value (code extension), or
may also be '' or None.
If blank or None, ISO_IR 6 is used.
"""
if not dicom_character_set:
dicom_character_set = ['ISO_IR 6']
have_character_set_list = True
try:
dicom_character_set.append # check if is list-like object
except AttributeError:
have_character_set_list = False
if have_character_set_list:
if not dicom_character_set[0]:
dicom_character_set[0] = "ISO_IR 6"
else:
dicom_character_set = [dicom_character_set]
encodings = [python_encoding[x] for x in dicom_character_set]
if len(encodings) == 1:
encodings = [encodings[0]] * 3
if len(encodings) == 2:
encodings.append(encodings[1])
# decode the string value to unicode
# PN is special case as may have 3 components with differenct chr sets
if data_element.VR == "PN":
# logger.warn("%s ... type: %s" %(str(data_element), type(data_element.VR)))
if data_element.VM == 1:
data_element.value = PersonNameUnicode(data_element.value, encodings)
else:
data_element.value = [PersonNameUnicode(value, encodings)
for value in data_element.value]
if data_element.VR in text_VRs:
# Remove the first encoding if this is a multi-byte encoding
if len(encodings) > 1:
del encodings[0]
if data_element.VM == 1:
data_element.value = clean_escseq(
data_element.value.decode(
encodings[0]), encodings)
else:
data_element.value = [clean_escseq(
value.decode(encodings[0]), encodings)
for value in data_element.value]
| Python |
#pydicom_Tkinter.py
#
# Copyright (c) 2009 Daniel Nanz
# This file is released under the pydicom (http://code.google.com/p/pydicom/)
# license, see the file license.txt available at
# (http://code.google.com/p/pydicom/)
#
# revision history:
# Dec-08-2009: version 0.1
#
# 0.1: tested with pydicom version 0.9.3, Python version 2.6.2 (32-bit)
# under Windows XP Professional 2002, and Mac OS X 10.5.5,
# using numpy 1.3.0 and a small random selection of MRI and
# CT images.
'''
View DICOM images from pydicom
requires numpy: http://numpy.scipy.org/
Usage:
------
>>> import dicom # pydicom
>>> import dicom.contrib.pydicom_Tkinter as pydicom_Tkinter # this module
>>> df = dicom.read_file(filename)
>>> pydicom_Tkinter.show_image(df)
'''
import Tkinter
import tempfile
import os
have_numpy = True
try:
import numpy as np
except:
# will not work...
have_numpy = False
def get_PGM_bytedata_string(arr):
'''Given a 2D numpy array as input write gray-value image data in the PGM
format into a byte string and return it.
arr: single-byte unsigned int numpy array
note: Tkinter's PhotoImage object seems to accept only single-byte data
'''
if arr.dtype != np.uint8:
raise ValueError
if len(arr.shape) != 2:
raise ValueError
# array.shape is (#rows, #cols) tuple; PGM input needs this reversed
col_row_string = ' '.join(reversed(map(str, arr.shape)))
bytedata_string = '\n'.join(('P5',
col_row_string,
str(arr.max()),
arr.tostring()))
return bytedata_string
def get_PGM_from_numpy_arr(arr, window_center, window_width,
lut_min=0, lut_max=255):
'''real-valued numpy input -> PGM-image formatted byte string
arr: real-valued numpy array to display as grayscale image
window_center, window_width: to define max/min values to be mapped to the
lookup-table range. WC/WW scaling is done
according to DICOM-3 specifications.
lut_min, lut_max: min/max values of (PGM-) grayscale table: do not change
'''
if np.isreal(arr).sum() != arr.size:
raise ValueError
# currently only support 8-bit colors
if lut_max != 255:
raise ValueError
if arr.dtype != np.float64:
arr = arr.astype(np.float64)
# LUT-specific array scaling
# width >= 1 (DICOM standard)
window_width = max(1, window_width)
wc, ww = np.float64(window_center), np.float64(window_width)
lut_range = np.float64(lut_max) - lut_min
minval = wc - 0.5 - (ww - 1.0) / 2.0
maxval = wc - 0.5 + (ww - 1.0) / 2.0
min_mask = (minval >= arr)
to_scale = (arr > minval) & (arr < maxval)
max_mask = (arr >= maxval)
if min_mask.any(): arr[min_mask] = lut_min
if to_scale.any(): arr[to_scale] = ((arr[to_scale] - (wc - 0.5)) /
(ww - 1.0) + 0.5) * lut_range + lut_min
if max_mask.any(): arr[max_mask] = lut_max
# round to next integer values and convert to unsigned int
arr = np.rint(arr).astype(np.uint8)
# return PGM byte-data string
return get_PGM_bytedata_string(arr)
def get_tkinter_photoimage_from_pydicom_image(data):
'''
Wrap data.pixel_array in a Tkinter PhotoImage instance,
after conversion into a PGM grayscale image.
This will fail if the "numpy" module is not installed in the attempt of
creating the data.pixel_array.
data: object returned from pydicom.read_file()
side effect: may leave a temporary .pgm file on disk
'''
# get numpy array as representation of image data
arr = data.pixel_array.astype(np.float64)
# pixel_array seems to be the original, non-rescaled array.
# If present, window center and width refer to rescaled array
# -> do rescaling if possible.
if ('RescaleIntercept' in data) and ('RescaleSlope' in data):
intercept = data.RescaleIntercept # single value
slope = data.RescaleSlope #
arr = slope * arr + intercept
# get default window_center and window_width values
wc = (arr.max() + arr.min()) / 2.0
ww = arr.max() - arr.min() + 1.0
# overwrite with specific values from data, if available
if ('WindowCenter' in data) and ('WindowWidth' in data):
wc = data.WindowCenter
ww = data.WindowWidth
try:
wc = wc[0] # can be multiple values
except:
pass
try:
ww = ww[0]
except:
pass
# scale array to account for center, width and PGM grayscale range,
# and wrap into PGM formatted ((byte-) string
pgm = get_PGM_from_numpy_arr(arr, wc, ww)
# create a PhotoImage
# for as yet unidentified reasons the following fails for certain
# window center/width values:
# photo_image = Tkinter.PhotoImage(data=pgm, gamma=1.0)
# Error with Python 2.6.2 under Windows XP:
# (self.tk.call(('image', 'create', imgtype, name,) + options)
# _tkinter.TclError: truncated PPM data
# OsX: distorted images
# while all seems perfectly OK for other values of center/width or when
# the PGM is first written to a temporary file and read again
# write PGM file into temp dir
(os_id, abs_path) = tempfile.mkstemp(suffix='.pgm')
with open(abs_path, 'wb') as fd:
fd.write(pgm)
photo_image = Tkinter.PhotoImage(file=abs_path, gamma=1.0)
# close and remove temporary file on disk
# os.close is needed under windows for os.remove not to fail
try:
os.close(os_id)
os.remove(abs_path)
except:
pass # silently leave file on disk in temp-like directory
return photo_image
def show_image(data, block=True, master=None):
'''
Get minimal Tkinter GUI and display a pydicom data.pixel_array
data: object returned from pydicom.read_file()
block: if True run Tk mainloop() to show the image
master: use with block==False and an existing Tk widget as parent widget
side effects: may leave a temporary .pgm file on disk
'''
frame = Tkinter.Frame(master=master, background='#000')
if 'SeriesDescription' in data and 'InstanceNumber' in data:
title = ', '.join(('Ser: ' + data.SeriesDescription,
'Img: ' + str(data.InstanceNumber)))
else:
title = 'pydicom image'
frame.master.title(title)
photo_image = get_tkinter_photoimage_from_pydicom_image(data)
label = Tkinter.Label(frame, image=photo_image, background='#000')
# keep a reference to avoid disappearance upon garbage collection
label.photo_reference = photo_image
label.grid()
frame.grid()
if block==True:
frame.mainloop()
| Python |
# dicom_series.py
"""
By calling the function read_files with a directory name or list
of files as an argument, a list of DicomSeries instances can be
obtained. A DicomSeries object has some attributes that give
information about the serie (such as shape, sampling, suid) and
has an info attribute, which is a dicom.DataSet instance containing
information about the first dicom file in the serie. The data can
be obtained using the get_pixel_array() method, which produces a
3D numpy array if there a multiple files in the serie.
This module can deal with gated data, in which case a DicomSeries
instance is created for each 3D volume.
"""
#
# Copyright (c) 2010 Almar Klein
# This file is released under the pydicom license.
# See the file license.txt included with the pydicom distribution, also
# available at http://pydicom.googlecode.com
#
# I (Almar) performed some test to loading a series of data
# in two different ways: loading all data, and deferring loading
# the data. Both ways seem equally fast on my system. I have to
# note that results can differ quite a lot depending on the system,
# but still I think this suggests that deferred reading is in
# general not slower. I think deferred loading of the pixel data
# can be advantageous because maybe not all data of all series
# is needed. Also it simply saves memory, because the data is
# removed from the Dataset instances.
# In the few result below, cold means reading for the first time,
# warm means reading 2nd/3d/etc time.
# - Full loading of data, cold: 9 sec
# - Full loading of data, warm: 3 sec
# - Deferred loading of data, cold: 9 sec
# - Deferred loading of data, warm: 3 sec
import os, sys, time, gc
import dicom
from dicom.sequence import Sequence
# Try importing numpy
try:
import numpy as np
have_numpy = True
except Exception:
np = None
have_numpy = False
## Helper functions and classes
class ProgressBar:
""" To print progress to the screen.
"""
def __init__(self, char='-', length=20):
self.char = char
self.length = length
self.progress = 0.0
self.nbits = 0
self.what = ''
def Start(self, what=''):
""" Start(what='')
Start the progress bar, displaying the given text first.
Make sure not to print anything untill after calling
Finish(). Messages can be printed while displaying
progess by using printMessage().
"""
self.what = what
self.progress = 0.0
self.nbits = 0
sys.stdout.write(what+" [")
def Stop(self, message=""):
""" Stop the progress bar where it is now.
Optionally print a message behind it."""
delta = int(self.length - self.nbits)
sys.stdout.write( " "*delta + "] " + message + "\n")
def Finish(self, message=""):
""" Finish the progress bar, setting it to 100% if it
was not already. Optionally print a message behind the bar.
"""
delta = int(self.length-self.nbits)
sys.stdout.write( self.char*delta + "] " + message + "\n")
def Update(self, newProgress):
""" Update progress. Progress is given as a number
between 0 and 1.
"""
self.progress = newProgress
required = self.length*(newProgress)
delta = int(required-self.nbits)
if delta>0:
sys.stdout.write(self.char*delta)
self.nbits += delta
def PrintMessage(self, message):
""" Print a message (for example a warning).
The message is printed behind the progress bar,
and a new bar is started.
"""
self.Stop(message)
self.Start(self.what)
def _dummyProgressCallback(progress):
""" A callback to indicate progress that does nothing. """
pass
_progressBar = ProgressBar()
def _progressCallback(progress):
""" The default callback for displaying progress. """
if isinstance(progress, basestring):
_progressBar.Start(progress)
_progressBar._t0 = time.time()
elif progress is None:
dt = time.time() - _progressBar._t0
_progressBar.Finish('%2.2f seconds' % dt)
else:
_progressBar.Update(progress)
def _listFiles(files, path):
"""List all files in the directory, recursively. """
for item in os.listdir(path):
item = os.path.join(path, item)
if os.path.isdir( item ):
_listFiles(files, item)
else:
files.append(item)
def _splitSerieIfRequired(serie, series):
""" _splitSerieIfRequired(serie, series)
Split the serie in multiple series if this is required.
The choice is based on examing the image position relative to
the previous image. If it differs too much, it is assumed
that there is a new dataset. This can happen for example in
unspitted gated CT data.
"""
# Sort the original list and get local name
serie._sort()
L = serie._datasets
# Init previous slice
ds1 = L[0]
# Check whether we can do this
if not "ImagePositionPatient" in ds1:
return
# Initialize a list of new lists
L2 = [[ds1]]
# Init slice distance estimate
distance = 0
for index in range(1,len(L)):
# Get current slice
ds2 = L[index]
# Get positions
pos1 = float(ds1.ImagePositionPatient[2])
pos2 = float(ds2.ImagePositionPatient[2])
# Get distances
newDist = abs(pos1 - pos2)
#deltaDist = abs(firstPos-pos2)
# If the distance deviates more than 2x from what we've seen,
# we can agree it's a new dataset.
if distance and newDist > 2.1*distance:
L2.append([])
distance = 0
else:
# Test missing file
if distance and newDist > 1.5*distance:
print 'Warning: missing file after "%s"' % ds1.filename
distance = newDist
# Add to last list
L2[-1].append( ds2 )
# Store previous
ds1 = ds2
# Split if we should
if len(L2) > 1:
# At what position are we now?
i = series.index(serie)
# Create new series
series2insert = []
for L in L2:
newSerie = DicomSeries(serie.suid, serie._showProgress)
newSerie._datasets = Sequence(L)
series2insert.append(newSerie)
# Insert series and remove self
for newSerie in reversed(series2insert):
series.insert(i, newSerie)
series.remove(serie)
pixelDataTag = dicom.tag.Tag(0x7fe0, 0x0010)
def _getPixelDataFromDataset(ds):
""" Get the pixel data from the given dataset. If the data
was deferred, make it deferred again, so that memory is
preserved. Also applies RescaleSlope and RescaleIntercept
if available. """
# Get original element
el = dict.__getitem__(ds, pixelDataTag)
# Get data
data = ds.pixel_array
# Remove data (mark as deferred)
dict.__setitem__(ds, pixelDataTag, el)
del ds._pixel_array
# Obtain slope and offset
slope = 1
offset = 0
needFloats = False
needApplySlopeOffset = False
if 'RescaleSlope' in ds:
needApplySlopeOffset = True
slope = ds.RescaleSlope
if 'RescaleIntercept' in ds:
needApplySlopeOffset = True
offset = ds.RescaleIntercept
if int(slope)!= slope or int(offset) != offset:
needFloats = True
if not needFloats:
slope, offset = int(slope), int(offset)
# Apply slope and offset
if needApplySlopeOffset:
# Maybe we need to change the datatype?
if data.dtype in [np.float32, np.float64]:
pass
elif needFloats:
data = data.astype(np.float32)
else:
# Determine required range
minReq, maxReq = data.min(), data.max()
minReq = min([minReq, minReq*slope+offset, maxReq*slope+offset])
maxReq = max([maxReq, minReq*slope+offset, maxReq*slope+offset])
# Determine required datatype from that
dtype = None
if minReq<0:
# Signed integer type
maxReq = max([-minReq, maxReq])
if maxReq < 2**7:
dtype = np.int8
elif maxReq < 2**15:
dtype = np.int16
elif maxReq < 2**31:
dtype = np.int32
else:
dtype = np.float32
else:
# Unsigned integer type
if maxReq < 2**8:
dtype = np.int8
elif maxReq < 2**16:
dtype = np.int16
elif maxReq < 2**32:
dtype = np.int32
else:
dtype = np.float32
# Change datatype
if dtype != data.dtype:
data = data.astype(dtype)
# Apply slope and offset
data *= slope
data += offset
# Done
return data
## The public functions and classes
def read_files(path, showProgress=False, readPixelData=False):
""" read_files(path, showProgress=False, readPixelData=False)
Reads dicom files and returns a list of DicomSeries objects, which
contain information about the data, and can be used to load the
image or volume data.
The parameter "path" can also be a list of files or directories.
If the callable "showProgress" is given, it is called with a single
argument to indicate the progress. The argument is a string when a
progress is started (indicating what is processed). A float indicates
progress updates. The paremeter is None when the progress is finished.
When "showProgress" is True, a default callback is used that writes
to stdout. By default, no progress is shown.
if readPixelData is True, the pixel data of all series is read. By
default the loading of pixeldata is deferred until it is requested
using the DicomSeries.get_pixel_array() method. In general, both
methods should be equally fast.
"""
# Init list of files
files = []
# Obtain data from the given path
if isinstance(path, basestring):
# Make dir nice
basedir = os.path.abspath(path)
# Check whether it exists
if not os.path.isdir(basedir):
raise ValueError('The given path is not a valid directory.')
# Find files recursively
_listFiles(files, basedir)
elif isinstance(path, (tuple, list)):
# Iterate over all elements, which can be files or directories
for p in path:
if os.path.isdir(p):
_listFiles(files, os.path.abspath(p))
elif os.path.isfile(p):
files.append(p)
else:
print "Warning, the path '%s' is not valid." % p
else:
raise ValueError('The path argument must be a string or list.')
# Set default progress callback?
if showProgress is True:
showProgress = _progressCallback
if not hasattr(showProgress, '__call__'):
showProgress = _dummyProgressCallback
# Set defer size
deferSize = 16383 # 128**2-1
if readPixelData:
deferSize = None
# Gather file data and put in DicomSeries
series = {}
count = 0
showProgress('Loading series information:')
for filename in files:
# Skip DICOMDIR files
if filename.count("DICOMDIR"):
continue
# Try loading dicom ...
try:
dcm = dicom.read_file( filename, deferSize )
except dicom.filereader.InvalidDicomError:
continue # skip non-dicom file
except Exception as why:
if showProgress is _progressCallback:
_progressBar.PrintMessage(str(why))
else:
print 'Warning:', why
continue
# Get SUID and register the file with an existing or new series object
try:
suid = dcm.SeriesInstanceUID
except AttributeError:
continue # some other kind of dicom file
if suid not in series:
series[suid] = DicomSeries(suid, showProgress)
series[suid]._append(dcm)
# Show progress (note that we always start with a 0.0)
showProgress( float(count) / len(files) )
count += 1
# Finish progress
showProgress( None )
# Make a list and sort, so that the order is deterministic
series = series.values()
series.sort(key=lambda x:x.suid)
# Split series if necessary
for serie in reversed([serie for serie in series]):
_splitSerieIfRequired(serie, series)
# Finish all series
showProgress('Analysing series')
series_ = []
for i in range(len(series)):
try:
series[i]._finish()
series_.append(series[i])
except Exception:
pass # Skip serie (probably report-like file without pixels)
showProgress(float(i+1)/len(series))
showProgress(None)
return series_
class DicomSeries(object):
""" DicomSeries
This class represents a serie of dicom files that belong together.
If these are multiple files, they represent the slices of a volume
(like for CT or MRI). The actual volume can be obtained using loadData().
Information about the data can be obtained using the info attribute.
"""
# To create a DicomSeries object, start by making an instance and
# append files using the "_append" method. When all files are
# added, call "_sort" to sort the files, and then "_finish" to evaluate
# the data, perform some checks, and set the shape and sampling
# attributes of the instance.
def __init__(self, suid, showProgress):
# Init dataset list and the callback
self._datasets = Sequence()
self._showProgress = showProgress
# Init props
self._suid = suid
self._info = None
self._shape = None
self._sampling = None
@property
def suid(self):
""" The Series Instance UID. """
return self._suid
@property
def shape(self):
""" The shape of the data (nz, ny, nx).
If None, the serie contains a single dicom file. """
return self._shape
@property
def sampling(self):
""" The sampling (voxel distances) of the data (dz, dy, dx).
If None, the serie contains a single dicom file. """
return self._sampling
@property
def info(self):
""" A DataSet instance containing the information as present in the
first dicomfile of this serie. """
return self._info
@property
def description(self):
""" A description of the dicom series. Used fields are
PatientName, shape of the data, SeriesDescription,
and ImageComments.
"""
info = self.info
# If no info available, return simple description
if info is None:
return "DicomSeries containing %i images" % len(self._datasets)
fields = []
# Give patient name
if 'PatientName' in info:
fields.append(""+info.PatientName)
# Also add dimensions
if self.shape:
tmp = [str(d) for d in self.shape]
fields.append( 'x'.join(tmp) )
# Try adding more fields
if 'SeriesDescription' in info:
fields.append("'"+info.SeriesDescription+"'")
if 'ImageComments' in info:
fields.append("'"+info.ImageComments+"'")
# Combine
return ' '.join(fields)
def __repr__(self):
adr = hex(id(self)).upper()
return "<DicomSeries with %i images at %s>" % (len(self._datasets), adr)
def get_pixel_array(self):
""" get_pixel_array()
Get (load) the data that this DicomSeries represents, and return
it as a numpy array. If this serie contains multiple images, the
resulting array is 3D, otherwise it's 2D.
If RescaleSlope and RescaleIntercept are present in the dicom info,
the data is rescaled using these parameters. The data type is chosen
depending on the range of the (rescaled) data.
"""
# Can we do this?
if not have_numpy:
msg = "The Numpy package is required to use get_pixel_array.\n"
raise ImportError(msg)
# It's easy if no file or if just a single file
if len(self._datasets)==0:
raise ValueError('Serie does not contain any files.')
elif len(self._datasets)==1:
ds = self._datasets[0]
slice = _getPixelDataFromDataset( ds )
return slice
# Check info
if self.info is None:
raise RuntimeError("Cannot return volume if series not finished.")
# Set callback to update progress
showProgress = self._showProgress
# Init data (using what the dicom packaged produces as a reference)
ds = self._datasets[0]
slice = _getPixelDataFromDataset( ds )
#vol = Aarray(self.shape, self.sampling, fill=0, dtype=slice.dtype)
vol = np.zeros(self.shape, dtype=slice.dtype)
vol[0] = slice
# Fill volume
showProgress('Loading data:')
ll = self.shape[0]
for z in range(1,ll):
ds = self._datasets[z]
vol[z] = _getPixelDataFromDataset(ds)
showProgress(float(z)/ll)
# Finish
showProgress(None)
# Done
gc.collect()
return vol
def _append(self, dcm):
""" _append(dcm)
Append a dicomfile (as a dicom.dataset.FileDataset) to the series.
"""
self._datasets.append(dcm)
def _sort(self):
""" sort()
Sort the datasets by instance number.
"""
self._datasets.sort(key=lambda k: k.InstanceNumber)
def _finish(self):
""" _finish()
Evaluate the series of dicom files. Together they should make up
a volumetric dataset. This means the files should meet certain
conditions. Also some additional information has to be calculated,
such as the distance between the slices. This method sets the
attributes for "shape", "sampling" and "info".
This method checks:
* that there are no missing files
* that the dimensions of all images match
* that the pixel spacing of all images match
"""
# The datasets list should be sorted by instance number
L = self._datasets
if len(L)==0:
return
elif len(L) < 2:
# Set attributes
ds = self._datasets[0]
self._info = self._datasets[0]
self._shape = [ds.Rows, ds.Columns]
self._sampling = [float(ds.PixelSpacing[0]), float(ds.PixelSpacing[1])]
return
# Get previous
ds1 = L[0]
# Init measures to calculate average of
distance_sum = 0.0
# Init measures to check (these are in 2D)
dimensions = ds1.Rows, ds1.Columns
sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1]) # row, column
for index in range(len(L)):
# The first round ds1 and ds2 will be the same, for the
# distance calculation this does not matter
# Get current
ds2 = L[index]
# Get positions
pos1 = float(ds1.ImagePositionPatient[2])
pos2 = float(ds2.ImagePositionPatient[2])
# Update distance_sum to calculate distance later
distance_sum += abs(pos1 - pos2)
# Test measures
dimensions2 = ds2.Rows, ds2.Columns
sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1])
if dimensions != dimensions2:
# We cannot produce a volume if the dimensions match
raise ValueError('Dimensions of slices does not match.')
if sampling != sampling2:
# We can still produce a volume, but we should notify the user
msg = 'Warning: sampling does not match.'
if self._showProgress is _progressCallback:
_progressBar.PrintMessage(msg)
else:
print msg
# Store previous
ds1 = ds2
# Create new dataset by making a deep copy of the first
info = dicom.dataset.Dataset()
firstDs = self._datasets[0]
for key in firstDs.keys():
if key != (0x7fe0, 0x0010):
el = firstDs[key]
info.add_new(el.tag, el.VR, el.value)
# Finish calculating average distance
# (Note that there are len(L)-1 distances)
distance_mean = distance_sum / (len(L)-1)
# Store information that is specific for the serie
self._shape = [len(L), ds2.Rows, ds2.Columns]
self._sampling = [distance_mean, float(ds2.PixelSpacing[0]),
float(ds2.PixelSpacing[1])]
# Store
self._info = info
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print "Expected a single argument: a directory with dicom files in it"
else:
adir = sys.argv[1]
t0 = time.time()
all_series = read_files(adir, None, False)
print "Summary of each series:"
for series in all_series:
print series.description
| Python |
#!/usr/bin/python
""" dicom_dao
Data Access Objects for persisting PyDicom DataSet objects.
Currently we support couchdb through the DicomCouch class.
Limitations:
- Private tags are discarded
TODO:
- Unit tests with multiple objects open at a time
- Unit tests with rtstruct objects
- Support for mongodb (mongo has more direct support for binary data)
Dependencies:
- PyDicom
- python-couchdb
- simplejson
Tested with:
- PyDicom 0.9.4-1
- python-couchdb 0.6
- couchdb 0.10.1
- simplejson 2.0.9
"""
#
# Copyright (c) 2010 Michael Wallace
# This file is released under the pydicom license.
# See the file license.txt included with the pydicom distribution, also
# available at http://pydicom.googlecode.com
#
import hashlib
import os
import string
import simplejson
import couchdb
import dicom
def uid2str(uid):
""" Convert PyDicom uid to a string """
return repr(uid).strip("'")
# When reading files a VR of 'US or SS' is left as binary, because we
# don't know how to interpret the values different numbers. We therefore
# treat it as binary and will continue to until either pydicom works it out
# for us, or we figure out a test.
BINARY_VR_VALUES = ['OW', 'OB', 'OW/OB', 'US or SS']
class DicomCouch(dict):
""" A Data Access Object for persisting PyDicom objects into CouchDB
We follow the same pattern as the python-couchdb library for getting and
setting documents, for example storing dicom.dataset.Dataset object dcm:
db = DicomCouch('http://localhost:5984/', 'dbname')
db[dcm.SeriesInstanceUID] = dcm
The only constraints on the key are that it must be json-serializable and
unique within the database instance. In theory it should be possible to
use any DICOM UID. Unfortunately I have written this code under the
assumption that SeriesInstanceUID will always be used. This will be fixed.
Retrieving object with key 'foo':
dcm = db['foo']
Deleting object with key 'foo':
dcm = db['foo']
db.delete(dcm)
TODO:
- It is possible to have couchdb assign a uid when adding objects. This
should be supported.
"""
def __init__(self, server, db):
""" Create connection to couchdb server/db """
super(DicomCouch, self).__init__()
self._meta = {}
server = couchdb.Server(server)
try:
self._db = server[db]
except couchdb.client.ResourceNotFound:
self._db = server.create(db)
def __getitem__(self, key):
""" Retrieve DICOM object with specified SeriesInstanceUID """
doc = self._db[key]
dcm = json2pydicom(doc)
if dcm.SeriesInstanceUID not in self._meta:
self._meta[dcm.SeriesInstanceUID] = {}
self._meta[dcm.SeriesInstanceUID]['hashes'] = {}
if '_attachments' in doc:
self.__get_attachments(dcm, doc)
_set_meta_info_dcm(dcm)
# Keep a copy of the couch doc for use in DELETE operations
self._meta[dcm.SeriesInstanceUID]['doc'] = doc
return dcm
def __setitem__(self, key, dcm):
""" Write the supplied DICOM object to the database """
try:
dcm.PixelData = dcm.pixel_array.tostring()
except AttributeError:
pass # Silently ignore errors due to pixel_array not existing
except NotImplementedError:
pass # Silently ignore attempts to modify compressed pixel data
except TypeError:
pass # Silently ignore errors due to PixelData not existing
jsn, binary_elements, file_meta_binary_elements = pydicom2json(dcm)
_strip_elements(jsn, binary_elements)
_strip_elements(jsn['file_meta'], file_meta_binary_elements)
if dcm.SeriesInstanceUID in self._meta:
self.__set_meta_info_jsn(jsn, dcm)
try: # Actually write to the db
self._db[key] = jsn
except TypeError as type_error:
if str(type_error) == 'string indices must be integers, not str':
pass
if dcm.SeriesInstanceUID not in self._meta:
self._meta[dcm.SeriesInstanceUID] = {}
self._meta[dcm.SeriesInstanceUID]['hashes'] = {}
self.__put_attachments(dcm, binary_elements, jsn)
# Get a local copy of the document
# We get this from couch because we get the _id, _rev and _attachments
# keys which will ensure we don't overwrite the attachments we just
# uploaded.
# I don't really like the extra HTTP GET and I think we can generate
# what we need without doing it. Don't have time to work out how yet.
self._meta[dcm.SeriesInstanceUID]['doc'] = \
self._db[dcm.SeriesInstanceUID]
def __str__(self):
""" Return the string representation of the couchdb client """
return str(self._db)
def __repr__(self):
""" Return the canonical string representation of the couchdb client """
return repr(self._db)
def __get_attachments(self, dcm, doc):
""" Set binary tags by retrieving attachments from couchdb.
Values are hashed so they are only updated if they have changed.
"""
for id in doc['_attachments'].keys():
tagstack = id.split(':')
value = self._db.get_attachment(doc['_id'], id)
_add_element(dcm, tagstack, value)
self._meta[dcm.SeriesInstanceUID]['hashes'][id] = hashlib.md5(value)
def __put_attachments(self, dcm, binary_elements, jsn):
""" Upload all new and modified attachments """
elements_to_update = \
[(tagstack, item) for tagstack, item in binary_elements \
if self.__attachment_update_needed(\
dcm, _tagstack2id(tagstack + [item.tag]), item)]
for tagstack, element in elements_to_update:
id = _tagstack2id(tagstack + [element.tag])
self._db.put_attachment(jsn, element.value, id)
self._meta[dcm.SeriesInstanceUID]['hashes'][id] = \
hashlib.md5(element.value)
def delete(self, dcm):
""" Delete from database and remove meta info from the DAO """
self._db.delete(self._meta[dcm.SeriesInstanceUID]['doc'])
self._meta.pop(dcm.SeriesInstanceUID)
def __set_meta_info_jsn(self, jsn, dcm):
""" Set the couch-specific meta data for supplied dict """
jsn['_rev'] = self._meta[dcm.SeriesInstanceUID]['doc']['_rev']
if '_attachments' in self._meta[dcm.SeriesInstanceUID]['doc']:
jsn['_attachments'] = \
self._meta[dcm.SeriesInstanceUID]['doc']['_attachments']
def __attachment_update_needed(self, dcm, id, binary_element):
""" Compare hashes for binary element and return true if different """
try:
hashes = self._meta[dcm.SeriesInstanceUID]['hashes']
except KeyError:
return True # If no hashes dict then attachments do not exist
if id not in hashes or hashes[id].digest() != \
hashlib.md5(binary_element.value).digest():
return True
else:
return False
def _add_element(dcm, tagstack, value):
""" Add element with tag, vr and value to dcm at location tagstack """
current_node = dcm
for item in tagstack[:-1]:
try:
address = int(item)
except ValueError:
address = dicom.tag.Tag(__str2tag(item))
current_node = current_node[address]
tag = __str2tag(tagstack[-1])
vr = dicom.datadict.dictionaryVR(tag)
current_node[tag] = dicom.dataelem.DataElement(tag, vr, value)
def _tagstack2id(tagstack):
""" Convert a list of tags to a unique (within document) attachment id """
return string.join([str(tag) for tag in tagstack], ':')
def _strip_elements(jsn, elements):
""" Remove supplied elements from the dict object
We use this with a list of binary elements so that we don't store
empty tags in couchdb when we are already storing the binary data as
attachments.
"""
for tagstack, element in elements:
if len(tagstack) == 0:
jsn.pop(element.tag)
else:
current_node = jsn
for tag in tagstack:
current_node = current_node[tag]
current_node.pop(element.tag)
def _set_meta_info_dcm(dcm):
""" Set the file metadata DataSet attributes
This is done by PyDicom when we dicom.read_file(foo) but we need to do it
ourselves when creating a DataSet from scratch, otherwise we cannot use
foo.pixel_array or dicom.write_file(foo).
This code is lifted from PyDicom.
"""
TransferSyntax = dcm.file_meta.TransferSyntaxUID
if TransferSyntax == dicom.UID.ExplicitVRLittleEndian:
dcm.is_implicit_vr = False
dcm.is_little_endian = True # This line not in PyDicom
elif TransferSyntax == dicom.UID.ImplicitVRLittleEndian:
dcm.is_implicit_vr = True
dcm.is_little_endian = True
elif TransferSyntax == dicom.UID.ExplicitVRBigEndian:
dcm.is_implicit_vr = False
dcm.is_little_endian = False
elif TransferSyntax == dicom.UID.DeflatedExplicitVRLittleEndian:
dcm.is_implicit_vr = False # Deleted lines above as it relates
dcm.is_little_endian = True # to reading compressed file data.
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE by
# Standard PS 3.5-2008 A.4 (p63)
dcm.is_implicit_vr = False
dcm.is_little_endian = True
def pydicom2json(dcm):
""" Convert the supplied PyDicom object into a json-serializable dict
Binary elements cannot be represented in json so we return these as
as separate list of the tuple (tagstack, element), where:
- element = dicom.dataelem.DataElement
- tagstack = list of tags/sequence IDs that address the element
The tagstack variable means we know the absolute address of each binary
element. We then use this as the attachment id in couchdb - when we
retrieve the attachment we can then insert it at the appropriate point in
the tree.
"""
dcm.remove_private_tags() # No support for now
dcm.decode() # Convert to unicode
binary_elements = []
tagstack = []
jsn = dict((key, __jsonify(dcm[key], binary_elements, tagstack))
for key in dcm.keys())
file_meta_binary_elements = []
jsn['file_meta'] = dict((key, __jsonify(dcm.file_meta[key],
file_meta_binary_elements, tagstack))
for key in dcm.file_meta.keys())
return jsn, binary_elements, file_meta_binary_elements
def __jsonify(element, binary_elements, tagstack):
""" Convert key, value to json-serializable types
Recursive, so if value is key/value pairs then all children will get
converted
"""
value = element.value
if element.VR in BINARY_VR_VALUES:
binary_elements.append((tagstack[:], element))
return ''
elif type(value) == list:
new_list = [__typemap(listvalue) for listvalue in value]
return new_list
elif type(value) == dicom.sequence.Sequence:
tagstack.append(element.tag)
nested_data = []
for i in range(0, len(value)):
tagstack.append(i)
nested_data.append(dict(\
(subkey, __jsonify(value[i][subkey], binary_elements, tagstack))
for subkey in value[i].keys()))
tagstack.pop()
tagstack.pop()
return nested_data
else:
return __typemap(value)
def __typemap(value):
""" Map PyDicom types that won't serialise to JSON types """
if type(value) == dicom.UID.UID:
return uid2str(value)
elif isinstance(value, dicom.tag.BaseTag):
return long(value)
else:
return value
def json2pydicom(jsn):
""" Convert the supplied json dict into a PyDicom object """
dataset = dicom.dataset.Dataset()
# Don't try to convert couch specific tags
dicom_keys = [key for key in jsn.keys() \
if key not in ['_rev', '_id', '_attachments', 'file_meta']]
for key in dicom_keys:
dataset.add(__dicomify(key, jsn[key]))
file_meta = dicom.dataset.Dataset()
for key in jsn['file_meta']:
file_meta.add(__dicomify(key, jsn['file_meta'][key]))
dataset.file_meta = file_meta
return dataset
def __dicomify(key, value):
""" Convert a json key, value to a PyDicom DataElement """
tag = __str2tag(key)
if tag.element == 0: # 0 tag implies group length (filreader.py pydicom)
vr = 'UL'
else:
vr = dicom.datadict.dictionaryVR(tag)
if vr == 'OW/OB': # Always write pixel data as bytes
vr = 'OB' # rather than words
if vr == 'US or SS': # US or SS is up to us as the data is already
if value < 0: # decoded. We therefore choose US, unless we
vr = 'SS' # need a signed value.
else:
vr = 'US'
if vr == 'SQ': # We have a sequence of datasets, so we recurse
return dicom.dataelem.DataElement(tag, vr,
dicom.sequence.Sequence([
__make_dataset(
[__dicomify(subkey, listvalue[subkey])
for subkey in listvalue.keys()
])
for listvalue in value
]))
else:
return dicom.dataelem.DataElement(tag, vr, value)
def __make_dataset(data_elements):
""" Create a Dataset from a list of DataElement objects """
dataset = dicom.dataset.Dataset()
for element in data_elements:
dataset.add(element)
return dataset
def __str2tag(key):
""" Convert string representation of a tag into a Tag """
return dicom.tag.Tag((int(key[1:5], 16), int(key[7:-1], 16)))
if __name__ == '__main__':
TESTDB = 'dicom_test'
SERVER = 'http://127.0.0.1:5984'
# Delete test database if it already exists
couch = couchdb.Server(SERVER)
try:
couch.delete(TESTDB)
except couchdb.client.ResourceNotFound:
pass # Don't worry if it didn't exist
db = DicomCouch(SERVER, TESTDB)
testfiles_dir = '../testfiles'
testfiles = os.listdir('../testfiles')
testfiles = filter(lambda x:x.endswith('dcm'), testfiles)
testfiles = map(lambda x:os.path.join('../testfiles', x), testfiles)
for dcmfile in testfiles:
dcm = dicom.read_file(dcmfile)
db[dcm.SeriesInstanceUID] = dcm
| Python |
# pydicom_PIL.py
"""View DICOM images using Python image Library (PIL)
Usage:
>>> import dicom
>>> from dicom.contrib.pydicom_PIL import show_PIL
>>> ds = dicom.read_file("filename")
>>> show_PIL(ds)
Requires Numpy: http://numpy.scipy.org/
and Python Imaging Library: http://www.pythonware.com/products/pil/
"""
# Copyright (c) 2009 Darcy Mason, Adit Panchal
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# Based on image.py from pydicom version 0.9.3,
# LUT code added by Adit Panchal
# Tested on Python 2.5.4 (32-bit) on Mac OS X 10.6
# using numpy 1.3.0 and PIL 1.1.7b1
have_PIL=True
try:
import PIL.Image
except:
have_PIL = False
have_numpy=True
try:
import numpy as np
except:
have_numpy = False
have_numpy=True
try:
import numpy as np
except:
have_numpy = False
def get_LUT_value(data, window, level):
"""Apply the RGB Look-Up Table for the given data and window/level value."""
if not have_numpy:
raise ImportError("Numpy is not available. See http://numpy.scipy.org/ to download and install")
return np.piecewise(data,
[data <= (level - 0.5 - (window-1)/2),
data > (level - 0.5 + (window-1)/2)],
[0, 255, lambda data: ((data - (level - 0.5))/(window-1) + 0.5)*(255-0)])
def get_LUT_value(data, window, level):
"""Apply the RGB Look-Up Table for the given data and window/level value."""
if not have_numpy:
raise ImportError("Numpy is not available. See http://numpy.scipy.org/ to download and install")
return np.piecewise(data,
[data <= (level - 0.5 - (window-1)/2),
data > (level - 0.5 + (window-1)/2)],
[0, 255, lambda data: ((data - (level - 0.5))/(window-1) + 0.5)*(255-0)])
# Display an image using the Python Imaging Library (PIL)
def show_PIL(dataset):
if not have_PIL:
raise ImportError("Python Imaging Library is not available. See http://www.pythonware.com/products/pil/ to download and install")
if ('PixelData' not in dataset):
raise TypeError("Cannot show image -- DICOM dataset does not have pixel data")
if ('WindowWidth' not in dataset) or ('WindowCenter' not in dataset): # can only apply LUT if these values exist
bits = dataset.BitsAllocated
samples = dataset.SamplesPerPixel
if bits == 8 and samples == 1:
mode = "L"
elif bits == 8 and samples == 3:
mode = "RGB"
elif bits == 16:
mode = "I;16" # not sure about this -- PIL source says is 'experimental' and no documentation. Also, should bytes swap depending on endian of file and system??
else:
raise TypeError("Don't know PIL mode for %d BitsAllocated and %d SamplesPerPixel" % (bits, samples))
# PIL size = (width, height)
size = (dataset.Columns, dataset.Rows)
im = PIL.Image.frombuffer(mode, size, dataset.PixelData, "raw", mode, 0, 1) # Recommended to specify all details by http://www.pythonware.com/library/pil/handbook/image.htm
else:
image = get_LUT_value(dataset.pixel_array, dataset.WindowWidth, dataset.WindowCenter)
im = PIL.Image.fromarray(image).convert('L') # Convert mode to L since LUT has only 256 values: http://www.pythonware.com/library/pil/handbook/image.htm
im.show()
| Python |
# __init__.py
# Mark the folder as a python package
| Python |
#==========================================================================
# imViewer-Simple.py
#
# An example program that opens uncompressed DICOM images and
# converts them via numPy and PIL to be viewed in wxWidgets GUI
# apps. The conversion is currently:
#
# pydicom->NumPy->PIL->wxPython.Image->wxPython.Bitmap
#
# Gruesome but it mostly works. Surely there is at least one
# of these steps that could be eliminated (probably PIL) but
# haven't tried that yet and I may want some of the PIL manipulation
# functions.
#
# This won't handle RLE, embedded JPEG-Lossy, JPEG-lossless,
# JPEG2000, old ACR/NEMA files, or anything wierd. Also doesn't
# handle some RGB images that I tried.
#
# Have added Adit Panchal's LUT code. It helps a lot, but needs
# to be further generalized. Added test for window and/or level
# as 'list' type - crude, but it worked for a bunch of old MR and
# CT slices I have.
#
# Testing: minimal
# Tried only on WinXP sp2 using numpy 1.3.0
# and PIL 1.1.7b1, Python 2.6.4, and wxPython 2.8.10.1
#
# Dave Witten: Nov. 11, 2009
#==========================================================================
import os
import os.path
import sys
import dicom
import wx
have_PIL = True
try:
import PIL.Image
except:
have_PIL = False
have_numpy = True
try:
import numpy as np
except:
have_numpy = False
#----------------------------------------------------------------
# Initialize image capabilities.
#----------------------------------------------------------------
wx.InitAllImageHandlers()
#----------------------------------------------------------------
# MsgDlg()
#----------------------------------------------------------------
def MsgDlg(window, string, caption='OFAImage', style=wx.YES_NO|wx.CANCEL):
"""Common MessageDialog."""
dlg = wx.MessageDialog(window, string, caption, style)
result = dlg.ShowModal()
dlg.Destroy()
return result
#=======================================================
# class ImFrame
#=======================================================
class ImFrame(wx.Frame):
"""Class for main window."""
#------------------------------------------------------------
# ImFrame.__init__()
#------------------------------------------------------------
def __init__(self, parent, title):
"""Create the pydicom image example's main frame window."""
wx.Frame.__init__(self,
parent,
id = -1,
title = "",
pos = wx.DefaultPosition,
size = wx.Size(w=1024, h=768),
style = wx.DEFAULT_FRAME_STYLE | wx.SUNKEN_BORDER | wx.CLIP_CHILDREN)
#--------------------------------------------------------
# Set up the menubar.
#--------------------------------------------------------
self.mainmenu = wx.MenuBar()
# Make the 'File' menu.
menu = wx.Menu()
item = menu.Append(wx.ID_ANY, '&Open', 'Open file for editing')
self.Bind(wx.EVT_MENU, self.OnFileOpen, item)
item = menu.Append(wx.ID_ANY, 'E&xit', 'Exit Program')
self.Bind(wx.EVT_MENU, self.OnFileExit, item)
self.mainmenu.Append(menu, '&File')
# Attach the menu bar to the window.
self.SetMenuBar(self.mainmenu)
#--------------------------------------------------------
# Set up the main splitter window.
#--------------------------------------------------------
self.mainSplitter = wx.SplitterWindow(self, style=wx.NO_3D | wx.SP_3D)
self.mainSplitter.SetMinimumPaneSize(1)
#-------------------------------------------------------------
# Create the folderTreeView on the left.
#-------------------------------------------------------------
self.dsTreeView = wx.TreeCtrl(self.mainSplitter, style=wx.TR_LINES_AT_ROOT | wx.TR_HAS_BUTTONS)
#--------------------------------------------------------
# Create the ImageView on the right pane.
#--------------------------------------------------------
self.imView = wx.Panel(self.mainSplitter, style=wx.VSCROLL | wx.HSCROLL | wx.CLIP_CHILDREN)
self.imView.Bind(wx.EVT_PAINT, self.OnPaint)
self.imView.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.imView.Bind(wx.EVT_SIZE, self.OnSize)
#--------------------------------------------------------
# Install the splitter panes.
#--------------------------------------------------------
self.mainSplitter.SplitVertically(self.dsTreeView, self.imView)
self.mainSplitter.SetSashPosition(300, True)
#--------------------------------------------------------
# Initialize some values
#--------------------------------------------------------
self.dcmdsRoot = False
self.foldersRoot = False
self.loadCentered = True
self.bitmap = None
self.Show(True)
#------------------------------------------------------------
# ImFrame.OnFileExit()
#------------------------------------------------------------
def OnFileExit(self, event):
"""Exits the program."""
self.Destroy()
event.Skip()
#------------------------------------------------------------
# ImFrame.OnSize()
#------------------------------------------------------------
def OnSize(self, event):
"Window 'size' event."
self.Refresh()
#------------------------------------------------------------
# ImFrame.OnEraseBackground()
#------------------------------------------------------------
def OnEraseBackground(self, event):
"Window 'erase background' event."
pass
#------------------------------------------------------------
# ImFrame.populateTree()
#------------------------------------------------------------
def populateTree(self, ds):
""" Populate the tree in the left window with the [desired]
dataset values"""
if not self.dcmdsRoot:
self.dcmdsRoot = self.dsTreeView.AddRoot(text="DICOM Objects")
else:
self.dsTreeView.DeleteChildren(self.dcmdsRoot)
self.recurse_tree(ds, self.dcmdsRoot)
self.dsTreeView.ExpandAll()
#------------------------------------------------------------
# ImFrame.recurse_tree()
#------------------------------------------------------------
def recurse_tree(self, ds, parent, hide=False):
""" order the dicom tags """
for data_element in ds:
if isinstance(data_element.value, unicode):
ip = self.dsTreeView.AppendItem(parent, text=unicode(data_element))
else:
ip = self.dsTreeView.AppendItem(parent, text=str(data_element))
if data_element.VR == "SQ":
for i, ds in enumerate(data_element.value):
sq_item_description = data_element.name.replace(" Sequence", "")
item_text = "%s %d" % (sq_item_description, i+1)
parentNodeID = self.dsTreeView.AppendItem(ip, text=item_text.rjust(128))
self.recurse_tree(ds, parentNodeID)
## --- Most of what is important happens below this line ---------------------
#------------------------------------------------------------
# ImFrame.OnFileOpen()
#------------------------------------------------------------
def OnFileOpen(self, event):
"""Opens a selected file."""
dlg = wx.FileDialog(self, 'Choose a file to add.', '', '', '*.*', wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
fullPath = dlg.GetPath()
imageFile = dlg.GetFilename()
#checkDICMHeader()
self.show_file(imageFile, fullPath)
#------------------------------------------------------------
# ImFrame.OnPaint()
#------------------------------------------------------------
def OnPaint(self, event):
"Window 'paint' event."
dc = wx.PaintDC(self.imView)
dc = wx.BufferedDC(dc)
# paint a background just so it isn't *so* boring.
dc.SetBackground(wx.Brush("WHITE"))
dc.Clear()
dc.SetBrush(wx.Brush("GREY", wx.CROSSDIAG_HATCH))
windowsize = self.imView.GetSizeTuple()
dc.DrawRectangle(0, 0, windowsize[0], windowsize[1])
bmpX0 = 0
bmpY0 = 0
if(self.bitmap != None):
if self.loadCentered:
bmpX0 = (windowsize[0] - self.bitmap.Width) / 2
bmpY0 = (windowsize[1] - self.bitmap.Height) / 2
dc.DrawBitmap(self.bitmap, bmpX0, bmpY0, False)
#------------------------------------------------------------
# ImFrame.ConvertWXToPIL()
# Expropriated from Andrea Gavana's
# ShapedButton.py in the wxPython dist
#------------------------------------------------------------
def ConvertWXToPIL(self, bmp):
""" Convert wx.Image Into PIL Image. """
width = bmp.GetWidth()
height = bmp.GetHeight()
im = wx.EmptyImage(width, height)
im.fromarray("RGBA", (width, height), bmp.GetData())
return img
#------------------------------------------------------------
# ImFrame.ConvertPILToWX()
# Expropriated from Andrea Gavana's
# ShapedButton.py in the wxPython dist
#------------------------------------------------------------
def ConvertPILToWX(self, pil, alpha=True):
""" Convert PIL Image Into wx.Image. """
if alpha:
image = apply(wx.EmptyImage, pil.size)
image.SetData(pil.convert("RGB").tostring())
image.SetAlphaData(pil.convert("RGBA").tostring()[3::4])
else:
image = wx.EmptyImage(pil.size[0], pil.size[1])
new_image = pil.convert('RGB')
data = new_image.tostring()
image.SetData(data)
return image
#-----------------------------------------------------------
# ImFrame.get_LUT_value()
#-----------------------------------------------------------
def get_LUT_value(self, data, window, level):
"""Apply the RGB Look-Up Table for the given data and window/level value."""
if not have_numpy:
raise ImportError("Numpy is not available. See http://numpy.scipy.org/ to download and install")
if isinstance(window, list):
window = window[0]
if isinstance(level, list):
level = level[0]
return np.piecewise(
data,
[data <= (level - 0.5 - (window-1)/2), data > (level - 0.5 + (window-1)/2)],
[0, 255, lambda data: ((data - (level - 0.5))/(window-1) + 0.5)*(255-0)]
)
#-----------------------------------------------------------
# ImFrame.loadPIL_LUT(dataset)
# Display an image using the Python Imaging Library (PIL)
#-----------------------------------------------------------
def loadPIL_LUT(self, dataset):
if not have_PIL:
raise ImportError("Python Imaging Library is not available. See http://www.pythonware.com/products/pil/ to download and install")
if('PixelData' not in dataset):
raise TypeError("Cannot show image -- DICOM dataset does not have pixel data")
if('WindowWidth' not in dataset) or ('WindowCenter' not in dataset): # can only apply LUT if these values exist
bits = dataset.BitsAllocated
samples = dataset.SamplesPerPixel
if bits == 8 and samples == 1:
mode = "L"
elif bits == 8 and samples == 3:
mode = "RGB"
elif bits == 16: # not sure about this -- PIL source says is 'experimental' and no documentation.
mode = "I;16" # Also, should bytes swap depending on endian of file and system??
else:
raise TypeError("Don't know PIL mode for %d BitsAllocated and %d SamplesPerPixel" % (bits, samples))
size = (dataset.Columns, dataset.Rows)
im = PIL.Image.frombuffer(mode, size, dataset.PixelData, "raw", mode, 0, 1) # Recommended to specify all details by http://www.pythonware.com/library/pil/handbook/image.htm
else:
image = self.get_LUT_value(dataset.pixel_array, dataset.WindowWidth, dataset.WindowCenter)
im = PIL.Image.fromarray(image).convert('L') # Convert mode to L since LUT has only 256 values: http://www.pythonware.com/library/pil/handbook/image.htm
return im
#------------------------------------------------------------
# ImFrame.show_file()
#------------------------------------------------------------
def show_file(self, imageFile, fullPath):
""" Load the DICOM file, make sure it contains at least one
image, and set it up for display by OnPaint(). ** be
careful not to pass a unicode string to read_file or it will
give you 'fp object does not have a defer_size attribute,
or some such."""
ds = dicom.read_file(str(fullPath))
ds.decode() # change strings to unicode
self.populateTree(ds)
if 'PixelData' in ds:
self.dImage = self.loadPIL_LUT(ds)
if self.dImage != None:
tmpImage = self.ConvertPILToWX(self.dImage, False)
self.bitmap = wx.BitmapFromImage(tmpImage)
self.Refresh()
##------ This is just the initialization of the App -------------------------
#=======================================================
# The main App Class.
#=======================================================
class App(wx.App):
"""Image Application."""
#------------------------------------------------------------
# App.OnInit()
#------------------------------------------------------------
def OnInit(self):
"""Create the Image Application."""
frame = ImFrame(None, 'wxImage Example')
return True
#---------------------------------------------------------------------
# If this file is running as main or a standalone test, begin execution here.
#---------------------------------------------------------------------
if __name__ == '__main__':
app = App(0)
app.MainLoop()
| Python |
# sequence.py
"""Hold the Sequence class, which stores a dicom sequence (list of Datasets)"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from dicom.dataset import Dataset
from dicom.multival import MultiValue
def validate_dataset(elem):
"""Ensures that the value is a Dataset instance"""
if not isinstance(elem, Dataset):
raise TypeError('Sequence contents must be a Dataset instance')
return elem
class Sequence(MultiValue):
"""Class to hold multiple Datasets in a list
This class is derived from MultiValue and as such enforces that all items
added to the list are Dataset instances. In order to due this, a validator
is substituted for type_constructor when constructing the MultiValue super
class
"""
def __init__(self, iterable=None):
"""Initialize a list of Datasets
:param iterable: an iterable (e.g. list, tuple) of Datasets. If no
value is provided, an empty Sequence is generated
"""
# We add this extra check to throw a relevant error. Without it, the
# error will be simply that a Sequence must contain Datasets (since a
# Dataset IS iterable). This error, however, doesn't inform the user
# that the actual issue is that their Dataset needs to be INSIDE an
# iterable object
if isinstance(iterable, Dataset):
raise TypeError('The Sequence constructor requires an iterable')
# If no inputs are provided, we create an empty Sequence
if not iterable:
iterable = list()
# validate_dataset is used as a pseudo type_constructor
super(Sequence, self).__init__(validate_dataset, iterable)
def __str__(self):
lines = [str(x) for x in self]
return "[" + "".join(lines) + "]"
def __repr__(self):
"""Sequence-specific string representation"""
formatstr = "<%(classname)s, length %(count)d, at %(id)X>"
return formatstr % {'classname': self.__class__.__name__,
'id': id(self), 'count': len(self)}
| Python |
# __init__.py for Dicom package
"""pydicom package -- easily handle DICOM files. See Quick Start below.
Copyright (c) 2008-2012 Darcy Mason
This file is part of pydicom, released under a modified MIT license.
See the file license.txt included with this distribution, also
available at http://pydicom.googlecode.com
-----------
Quick Start
-----------
1. A simple program to read a dicom file, modify a value, and write to a new file::
import dicom
dataset = dicom.read_file("file1.dcm")
dataset.PatientName = 'anonymous'
dataset.save_as("file2.dcm")
2. See the files in the examples directory that came with this package for more
examples, including some interactive sessions.
3. Learn the methods of the Dataset class; that is the one you will
work with most directly.
4. Questions/comments etc can be directed to the pydicom google group at
http://groups.google.com/group/pydicom
"""
import sys
if sys.version_info < (2, 6, 0):
raise ImportError("pydicom > 0.9.7 requires python 2.6 or later")
in_py3 = sys.version_info[0] > 2
# Set up logging system for the whole package.
# In each module, set logger=logging.getLogger('pydicom') and the same instance
# will be used by all
# At command line, turn on debugging for all pydicom functions with:
# import dicom
# dicom.debug()
# Turn off debugging with
# dicom.debug(False)
import logging
def debug(debug_on=True):
"""Turn debugging of DICOM file reading and writing on or off.
When debugging is on, file location and details about the elements read at
that location are logged to the 'pydicom' logger using python's logging module.
:param debug_on: True (default) to turn on debugging, False to turn off.
"""
global logger, debugging
if debug_on:
logger.setLevel(logging.DEBUG)
debugging = True
else:
logger.setLevel(logging.WARNING)
debugging = False
logger = logging.getLogger('pydicom')
handler = logging.StreamHandler()
# formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s", "%Y-%m-%d %H:%M") #'%(asctime)s %(levelname)s %(message)s'
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
debug(False) # force level=WARNING, in case logging default is set differently (issue 102)
# For convenience, import the read_file and write_file functions (most used)
# into the "dicom" namespace.
from dicom.filereader import read_file
from dicom.filewriter import write_file
__version__ = "1.0a"
__version_info__ = (1, 0, 0)
| Python |
# filewriter.py
"""Write a dicom media file."""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from struct import pack
import logging
logger = logging.getLogger('pydicom')
from dicom import in_py3
from dicom.charset import default_encoding
from dicom.UID import ExplicitVRLittleEndian, ImplicitVRLittleEndian, ExplicitVRBigEndian
from dicom.filebase import DicomFile
from dicom.datadict import dictionaryVR
from dicom.dataset import Dataset
from dicom.dataelem import DataElement
from dicom.tag import Tag, ItemTag, ItemDelimiterTag, SequenceDelimiterTag
from dicom.sequence import Sequence
from dicom.valuerep import extra_length_VRs
def write_numbers(fp, data_element, struct_format):
"""Write a "value" of type struct_format from the dicom file.
"Value" can be more than one number.
struct_format -- the character format as used by the struct module.
"""
endianChar = '><'[fp.is_little_endian]
value = data_element.value
if value == "":
return # don't need to write anything for empty string
format_string = endianChar + struct_format
try:
try:
value.append # works only if list, not if string or number
except: # is a single value - the usual case
fp.write(pack(format_string, value))
else:
for val in value:
fp.write(pack(format_string, val))
except Exception as e:
raise IOError("{0}\nfor data_element:\n{1}".format(str(e), str(data_elemesnt)))
def write_OBvalue(fp, data_element):
"""Write a data_element with VR of 'other byte' (OB)."""
fp.write(data_element.value)
def write_OWvalue(fp, data_element):
"""Write a data_element with VR of 'other word' (OW).
Note: This **does not currently do the byte swapping** for Endian state.
"""
# XXX for now just write the raw bytes without endian swapping
fp.write(data_element.value)
def write_UI(fp, data_element):
"""Write a data_element with VR of 'unique identifier' (UI)."""
write_string(fp, data_element, '\0') # pad with 0-byte to even length
def multi_string(val):
"""Put a string together with delimiter if has more than one value"""
if isinstance(val, (list, tuple)):
return "\\".join(val) # \ is escape chr, so "\\" gives single backslash
else:
return val
def write_string(fp, data_element, padding=' '):
"""Write a single or multivalued string."""
val = multi_string(data_element.value)
if len(val) % 2 != 0:
val = val + padding # pad to even length
if in_py3:
val = bytes(val, default_encoding)
fp.write(val)
def write_number_string(fp, data_element, padding=' '):
"""Handle IS or DS VR - write a number stored as a string of digits."""
# If the DS or IS has an original_string attribute, use that, so that
# unchanged data elements are written with exact string as when read from file
val = data_element.value
if isinstance(val, (list, tuple)):
val = "\\".join((x.original_string if hasattr(x, 'original_string')
else str(x) for x in val))
else:
val = val.original_string if hasattr(val, 'original_string') else str(val)
if len(val) % 2 != 0:
val = val + padding # pad to even length
if in_py3:
val = bytes(val, default_encoding)
fp.write(val)
def write_data_element(fp, data_element):
"""Write the data_element to file fp according to dicom media storage rules."""
fp.write_tag(data_element.tag)
VR = data_element.VR
if not fp.is_implicit_VR:
if len(VR) != 2:
msg = "Cannot write ambiguous VR of '%s' for data element with tag %r." % (VR, data_element.tag)
msg += "\nSet the correct VR before writing, or use an implicit VR transfer syntax"
raise ValueError(msg)
if in_py3:
fp.write(bytes(VR, default_encoding))
else:
fp.write(VR)
if VR in extra_length_VRs:
fp.write_US(0) # reserved 2 bytes
if VR not in writers:
raise NotImplementedError("write_data_element: unknown Value Representation '{0}'".format(VR))
length_location = fp.tell() # save location for later.
if not fp.is_implicit_VR and VR not in ['OB', 'OW', 'OF', 'SQ', 'UT', 'UN']:
fp.write_US(0) # Explicit VR length field is only 2 bytes
else:
fp.write_UL(0xFFFFFFFFL) # will fill in real length value later if not undefined length item
try:
writers[VR][0] # if writer is a tuple, then need to pass a number format
except TypeError:
writers[VR](fp, data_element) # call the function to write that kind of item
else:
writers[VR][0](fp, data_element, writers[VR][1])
# print DataElement(tag, VR, value)
is_undefined_length = False
if hasattr(data_element, "is_undefined_length") and data_element.is_undefined_length:
is_undefined_length = True
location = fp.tell()
fp.seek(length_location)
if not fp.is_implicit_VR and VR not in ['OB', 'OW', 'OF', 'SQ', 'UT', 'UN']:
fp.write_US(location - length_location - 2) # 2 is length of US
else:
# write the proper length of the data_element back in the length slot, unless is SQ with undefined length.
if not is_undefined_length:
fp.write_UL(location - length_location - 4) # 4 is length of UL
fp.seek(location) # ready for next data_element
if is_undefined_length:
fp.write_tag(SequenceDelimiterTag)
fp.write_UL(0) # 4-byte 'length' of delimiter data item
def write_dataset(fp, dataset):
"""Write a Dataset dictionary to the file. Return the total length written."""
fpStart = fp.tell()
# data_elements must be written in tag order
tags = sorted(dataset.keys())
for tag in tags:
write_data_element(fp, dataset[tag])
return fp.tell() - fpStart
def write_sequence(fp, data_element):
"""Write a dicom Sequence contained in data_element to the file fp."""
# write_data_element has already written the VR='SQ' (if needed) and
# a placeholder for length"""
sequence = data_element.value
for dataset in sequence:
write_sequence_item(fp, dataset)
def write_sequence_item(fp, dataset):
"""Write an item (dataset) in a dicom Sequence to the dicom file fp."""
# see Dicom standard Part 5, p. 39 ('03 version)
# This is similar to writing a data_element, but with a specific tag for Sequence Item
fp.write_tag(ItemTag) # marker for start of Sequence Item
length_location = fp.tell() # save location for later.
fp.write_UL(0xffffffffL) # will fill in real value later if not undefined length
write_dataset(fp, dataset)
if getattr(dataset, "is_undefined_length_sequence_item", False):
fp.write_tag(ItemDelimiterTag)
fp.write_UL(0) # 4-bytes 'length' field for delimiter item
else: # we will be nice and set the lengths for the reader of this file
location = fp.tell()
fp.seek(length_location)
fp.write_UL(location - length_location - 4) # 4 is length of UL
fp.seek(location) # ready for next data_element
def write_UN(fp, data_element):
"""Write a byte string for an DataElement of value 'UN' (unknown)."""
fp.write(data_element.value)
def write_ATvalue(fp, data_element):
"""Write a data_element tag to a file."""
try:
iter(data_element.value) # see if is multi-valued AT; # Note will fail if Tag ever derived from true tuple rather than being a long
except TypeError:
tag = Tag(data_element.value) # make sure is expressed as a Tag instance
fp.write_tag(tag)
else:
tags = [Tag(tag) for tag in data_element.value]
for tag in tags:
fp.write_tag(tag)
def _write_file_meta_info(fp, meta_dataset):
"""Write the dicom group 2 dicom storage File Meta Information to the file.
The file should already be positioned past the 128 byte preamble.
Raises ValueError if the required data_elements (elements 2,3,0x10,0x12)
are not in the dataset. If the dataset came from a file read with
read_file(), then the required data_elements should already be there.
"""
fp.write(b'DICM')
# File meta info is always LittleEndian, Explicit VR. After will change these
# to the transfer syntax values set in the meta info
fp.is_little_endian = True
fp.is_implicit_VR = False
if Tag((2, 1)) not in meta_dataset:
meta_dataset.add_new((2, 1), b'OB', b"\0\1") # file meta information version
# Now check that required meta info tags are present:
missing = []
for element in [2, 3, 0x10, 0x12]:
if Tag((2, element)) not in meta_dataset:
missing.append(Tag((2, element)))
if missing:
raise ValueError("Missing required tags {0} for file meta information".format(str(missing)))
# Put in temp number for required group length, save current location to come back
meta_dataset[(2, 0)] = DataElement((2, 0), 'UL', 0) # put 0 to start
group_length_data_element_size = 12 # !based on DICOM std ExplVR
group_length_tell = fp.tell()
# Write the file meta datset, including temp group length
length = write_dataset(fp, meta_dataset)
group_length = length - group_length_data_element_size # counts from end of that
# Save end of file meta to go back to
end_of_file_meta = fp.tell()
# Go back and write the actual group length
fp.seek(group_length_tell)
group_length_data_element = DataElement((2, 0), 'UL', group_length)
write_data_element(fp, group_length_data_element)
# Return to end of file meta, ready to write remainder of the file
fp.seek(end_of_file_meta)
def write_file(filename, dataset, WriteLikeOriginal=True):
"""Store a Dataset to the filename specified.
Set dataset.preamble if you want something other than 128 0-bytes.
If the dataset was read from an existing dicom file, then its preamble
was stored at read time. It is up to you to ensure the preamble is still
correct for its purposes.
If there is no Transfer Syntax tag in the dataset,
Set dataset.is_implicit_VR, and .is_little_endian
to determine the transfer syntax used to write the file.
WriteLikeOriginal -- True if want to preserve the following for each sequence
within this dataset:
- preamble -- if no preamble in read file, than not used here
- dataset.hasFileMeta -- if writer did not do file meta information,
then don't write here either
- seq.is_undefined_length -- if original had delimiters, write them now too,
instead of the more sensible length characters
- <dataset>.is_undefined_length_sequence_item -- for datasets that belong to a
sequence, write the undefined length delimiters if that is
what the original had
Set WriteLikeOriginal = False to produce a "nicer" DICOM file for other readers,
where all lengths are explicit.
"""
# Decide whether to write DICOM preamble. Should always do so unless trying to mimic the original file read in
preamble = getattr(dataset, "preamble", None)
if not preamble and not WriteLikeOriginal:
preamble = b"\0" * 128
file_meta = dataset.file_meta
if file_meta is None:
file_meta = Dataset()
if 'TransferSyntaxUID' not in file_meta:
if dataset.is_little_endian and dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ImplicitVRLittleEndian)
elif dataset.is_little_endian and not dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ExplicitVRLittleEndian)
elif not dataset.is_little_endian and not dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ExplicitVRBigEndian)
else:
raise NotImplementedError("pydicom has not been verified for Big Endian with Implicit VR")
fp = DicomFile(filename, 'wb')
try:
if preamble:
fp.write(preamble) # blank 128 byte preamble
_write_file_meta_info(fp, file_meta)
# Set file VR, endian. MUST BE AFTER writing META INFO (which changes to Explict LittleEndian)
fp.is_implicit_VR = dataset.is_implicit_VR
fp.is_little_endian = dataset.is_little_endian
write_dataset(fp, dataset)
finally:
fp.close()
# Map each VR to a function which can write it
# for write_numbers, the Writer maps to a tuple (function, struct_format)
# (struct_format is python's struct module format)
writers = {'UL': (write_numbers, 'L'), 'SL': (write_numbers, 'l'),
'US': (write_numbers, 'H'), 'SS': (write_numbers, 'h'),
'FL': (write_numbers, 'f'), 'FD': (write_numbers, 'd'),
'OF': (write_numbers, 'f'),
'OB': write_OBvalue, 'UI': write_UI,
'SH': write_string, 'DA': write_string, 'TM': write_string,
'CS': write_string, 'PN': write_string, 'LO': write_string,
'IS': write_number_string, 'DS': write_number_string, 'AE': write_string,
'AS': write_string,
'LT': write_string,
'SQ': write_sequence,
'UN': write_UN,
'AT': write_ATvalue,
'ST': write_string,
'OW': write_OWvalue,
'US or SS': write_OWvalue,
'OW/OB': write_OBvalue,
'OB/OW': write_OBvalue,
'OB or OW': write_OBvalue,
'OW or OB': write_OBvalue,
'DT': write_string,
'UT': write_string,
} # note OW/OB depends on other items, which we don't know at write time
| Python |
# UID.py
"""Dicom Unique identifiers"""
# Copyright (c) 2008 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os
import uuid
import time, datetime
from math import fabs
from _UID_dict import UID_dictionary
class InvalidUID(Exception):
'''
Throw when DICOM UID is invalid
Example of invalid UID::
>>> uid = '1.2.123.'
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class UID(str):
"""Subclass python string so have human-friendly UIDs
Use like:
uid = UID('1.2.840.10008.1.2.4.50')
then
uid.name, uid.type, uid.info, and uid.is_retired all return
values from the UID_dictionary
String representation (__str__) will be the name,
__repr__ will be the full 1.2.840....
"""
def __new__(cls, val):
"""Set up new instance of the class"""
# Don't repeat if already a UID class -- then may get the name
# that str(uid) gives rather than the dotted number
if isinstance(val, UID):
return val
else:
if isinstance(val, basestring):
return super(UID, cls).__new__(cls, val.strip())
else:
raise TypeError("UID must be a string")
def __init__(self, val):
"""Initialize the UID properties
Sets name, type, info, is_retired, and is_transfer_syntax.
If UID is a transfer syntax, also sets is_little_endian,
is_implicit_VR, and is_deflated boolean values.
"""
# Note normally use __new__ on subclassing an immutable, but here we
# just want to do some pre-processing against the UID dictionary.
# "My" string can never change (it is a python immutable), so is safe
if self in UID_dictionary:
self.name, self.type, self.info, retired = UID_dictionary[self]
self.is_retired = bool(retired)
else:
self.name = str.__str__(self)
self.type, self.info, self.is_retired = (None, None, None)
# If the UID represents a transfer syntax, store info about that syntax
self.is_transfer_syntax = (self.type == "Transfer Syntax")
if self.is_transfer_syntax:
# Assume a transfer syntax, correct it as necessary
self.is_implicit_VR = True
self.is_little_endian = True
self.is_deflated = False
if val == '1.2.840.10008.1.2': # implicit VR little endian
pass
elif val == '1.2.840.10008.1.2.1': # ExplicitVRLittleEndian
self.is_implicit_VR = False
elif val == '1.2.840.10008.1.2.2': # ExplicitVRBigEndian
self.is_implicit_VR = False
self.is_little_endian = False
elif val == '1.2.840.10008.1.2.1.99': # DeflatedExplicitVRLittleEndian:
self.is_deflated = True
self.is_implicit_VR = False
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE by Standard PS 3.5-2008 A.4 (p63)
self.is_implicit_VR = False
def __str__(self):
"""Return the human-friendly name for this UID"""
return self.name
def __eq__(self, other):
"""Override string equality so either name or UID number match passes"""
if str.__eq__(self, other) is True: # 'is True' needed (issue 96)
return True
if str.__eq__(self.name, other) is True: # 'is True' needed (issue 96)
return True
return False
def is_valid(self):
'''
Raise an exception is the UID is invalid
Usage example::
>>> invalid_uid = dicom.UID.UID('1.2.345.')
>>> invalid_uid.is_valid(invalid_uid)
InvalidUID: 'Trailing dot at the end of the UID'
>>> valid_uid = dicom.UID.UID('1.2.123')
'''
if self[-1] == '.':
raise InvalidUID('Trailing dot at the end of the UID')
# For python 3, any override of __cmp__ or __eq__ immutable requires
# explicit redirect of hash function to the parent class
# See http://docs.python.org/dev/3.0/reference/datamodel.html#object.__hash__
def __hash__(self):
return super(UID, self).__hash__()
ExplicitVRLittleEndian = UID('1.2.840.10008.1.2.1')
ImplicitVRLittleEndian = UID('1.2.840.10008.1.2')
DeflatedExplicitVRLittleEndian = UID('1.2.840.10008.1.2.1.99')
ExplicitVRBigEndian = UID('1.2.840.10008.1.2.2')
NotCompressedPixelTransferSyntaxes = [ExplicitVRLittleEndian,
ImplicitVRLittleEndian,
DeflatedExplicitVRLittleEndian,
ExplicitVRBigEndian]
# Many thanks to the Medical Connections for offering free valid UIDs (http://www.medicalconnections.co.uk/FreeUID.html)
# Their service was used to obtain the following root UID for pydicom:
pydicom_root_UID = '1.2.826.0.1.3680043.8.498.'
pydicom_UIDs = {
pydicom_root_UID + '1': 'ImplementationClassUID',
}
def generate_uid(prefix=pydicom_root_UID, truncate=False):
'''
Generate a dicom unique identifier based on host id, process id and current
time. The max lenght of the generated UID is 64 caracters.
If the given prefix is ``None``, the UID is generated following the method
described on `David Clunie website
<http://www.dclunie.com/medical-image-faq/html/part2.html#UID>`_
Usage example::
>>> dicom.UID.generate_uid()
1.2.826.0.1.3680043.8.498.2913212949509824014974371514
>>> dicom.UID.generate_uid(None)
2.25.31215762025423160614120088028604965760
This method is inspired from the work of `DCMTK
<http://dicom.offis.de/dcmtk.php.en>`_.
:param prefix: The site root UID. Default to pydicom root UID.
'''
max_uid_len = 64
if prefix is None:
dicom_uid = '2.25.{0}'.format(uuid.uuid1().int)
else:
uid_info = [uuid.getnode(),
fabs(os.getpid()),
datetime.datetime.today().second,
datetime.datetime.today().microsecond]
suffix = ''.join([str(long(x)) for x in uid_info])
dicom_uid = ''.join([prefix, suffix])
if truncate:
dicom_uid = dicom_uid[:max_uid_len]
dicom_uid = UID(dicom_uid)
#This will raise an exception if the UID is invalid
dicom_uid.is_valid()
return dicom_uid
| Python |
# filebase.py
"""Hold DicomFile class, which does basic I/O for a dicom file."""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import absolute_import
from dicom.tag import Tag
from struct import unpack, pack
from io import BytesIO
import logging
logger = logging.getLogger('pydicom')
class DicomIO(object):
"""File object which holds transfer syntax info and anything else we need."""
max_read_attempts = 3 # number of times to read if don't get requested bytes
defer_size = None # default
def __init__(self, *args, **kwargs):
self._implicit_VR = True # start with this by default
def __del__(self):
self.close()
def read_le_tag(self):
"""Read and return two unsigned shorts (little endian) from the file."""
bytes_read = self.read(4)
if len(bytes_read) < 4:
raise EOFError # needed for reading "next" tag when at end of file
return unpack(bytes_read, b"<HH")
def read_be_tag(self):
"""Read and return two unsigned shorts (little endian) from the file."""
bytes_read = self.read(4)
if len(bytes_read) < 4:
raise EOFError # needed for reading "next" tag when at end of file
return unpack(bytes_read, b">HH")
def write_tag(self, tag):
"""Write a dicom tag (two unsigned shorts) to the file."""
tag = Tag(tag) # make sure is an instance of class, not just a tuple or int
self.write_US(tag.group)
self.write_US(tag.element)
def read_leUS(self):
"""Return an unsigned short from the file with little endian byte order"""
return unpack(b"<H", self.read(2))[0]
def read_beUS(self):
"""Return an unsigned short from the file with big endian byte order"""
return unpack(b">H", self.read(2))[0]
def read_leUL(self):
"""Return an unsigned long read with little endian byte order"""
return unpack(b"<L", self.read(4))[0]
def read(self, length=None, need_exact_length=True):
"""Reads the required length, returns EOFError if gets less
If length is None, then read all bytes
"""
parent_read = self.parent_read # super(DicomIO, self).read
if length is None:
return parent_read() # get all of it
bytes_read = parent_read(length)
if len(bytes_read) < length and need_exact_length:
# Didn't get all the desired bytes. Keep trying to get the rest. If reading across network, might want to add a delay here
attempts = 0
while attempts < self.max_read_attempts and len(bytes_read) < length:
bytes_read += parent_read(length - len(bytes_read))
attempts += 1
if len(bytes_read) < length:
start_pos = self.tell() - len(bytes_read)
msg = "Unexpected end of file. "
msg += "Read {0} bytes of {1} expected starting at position 0x{2:x}".format(len(bytes_read), length, start_pos)
raise EOFError(msg)
return bytes_read
def write_leUS(self, val):
"""Write an unsigned short with little endian byte order"""
self.write(pack(b"<H", val))
def write_leUL(self, val):
"""Write an unsigned long with little endian byte order"""
self.write(pack(b"<L", val))
def write_beUS(self, val):
"""Write an unsigned short with big endian byte order"""
self.write(pack(b">H", val))
def write_beUL(self, val):
"""Write an unsigned long with big endian byte order"""
self.write(pack(b">L", val))
write_US = write_leUS # XXX should we default to this?
write_UL = write_leUL # XXX "
def read_beUL(self):
"""Return an unsigned long read with big endian byte order"""
return unpack(b">L", self.read(4))[0]
# Set up properties is_little_endian and is_implicit_VR
# Big/Little Endian changes functions to read unsigned short or long, e.g. length fields etc
@property
def is_little_endian(self):
return self._little_endian
@is_little_endian.setter
def is_little_endian(self, value):
self._little_endian = value
if value: # Little Endian
self.read_US = self.read_leUS
self.read_UL = self.read_leUL
self.write_US = self.write_leUS
self.write_UL = self.write_leUL
self.read_tag = self.read_le_tag
else: # Big Endian
self.read_US = self.read_beUS
self.read_UL = self.read_beUL
self.write_US = self.write_beUS
self.write_UL = self.write_beUL
self.read_tag = self.read_be_tag
@property
def is_implicit_VR(self):
return self._implicit_VR
@is_implicit_VR.setter
def is_implicit_VR(self, value):
self._implicit_VR = value
class DicomFileLike(DicomIO):
def __init__(self, file_like_obj):
self.parent = file_like_obj
self.parent_read = file_like_obj.read
self.write = getattr(file_like_obj, "write", self.no_write)
self.seek = file_like_obj.seek
self.tell = file_like_obj.tell
self.close = file_like_obj.close
self.name = getattr(file_like_obj, 'name', '<no filename>')
def no_write(self, bytes_read):
"""Used for file-like objects where no write is available"""
raise IOError("This DicomFileLike object has no write() method")
def DicomFile(*args, **kwargs):
return DicomFileLike(open(*args, **kwargs))
def DicomBytesIO(*args, **kwargs):
return DicomFileLike(BytesIO(*args, **kwargs))
| Python |
# tag.py
"""Define Tag class to hold a dicom (group, element) tag"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# Store the 4 bytes of a dicom tag as an arbitary length integer
# (python "long" in python <3; "int" for python >=3).
# NOTE: This must be not be stored as a tuple internally, as some code logic
# (e.g. in write_AT of filewriter) checks if a value is a multi-value element
# So, represent as a single number and separate to (group, element) when necessary.
def Tag(arg, arg2=None):
"""General function for creating a Tag in any of the standard forms:
e.g. Tag(0x00100010), Tag(0x10,0x10), Tag((0x10, 0x10))
"""
if arg2 is not None:
arg = (arg, arg2) # act as if was passed a single tuple
if isinstance(arg, (tuple, list)):
if len(arg) != 2:
raise ValueError("Tag must be an int or a 2-tuple")
if isinstance(arg[0], (str, unicode)): # py2to3: unicode not needed in py3
if not isinstance(arg[1], (str, unicode)): # py3: ditto
raise ValueError("Both arguments must be hex strings if one is")
arg = (int(arg[0], 16), int(arg[1], 16))
if arg[0] > 0xFFFF or arg[1] > 0xFFFF:
raise OverflowError("Groups and elements of tags must each be <=2 byte integers")
long_value = (arg[0] << 16) | arg[1]
elif isinstance(arg, (str, unicode)): # py2to3: unicode not needed in pure py3
raise ValueError("Tags cannot be instantiated from a single string")
else: # given a single number to use as a tag, as if (group, elem) already joined to a long
long_value = arg
if long_value > 0xFFFFFFFFL:
raise OverflowError("Tags are limited to 32-bit length; tag {0!r}".format(arg))
return BaseTag(long_value)
# py2to3: for some reason, the BaseTag class derived directly from long below
# was not converted by 2to3, but conversion does work with this next line
BaseTag_base_class = long # converted to "int" by 2to3
class BaseTag(BaseTag_base_class):
"""Class for storing the dicom (group, element) tag"""
# Override comparisons so can convert "other" to Tag as necessary
# See Ordering Comparisons at http://docs.python.org/dev/3.0/whatsnew/3.0.html
def __lt__(self, other):
# Check if comparing with another Tag object; if not, create a temp one
if not isinstance(other, BaseTag):
try:
other = Tag(other)
except:
raise TypeError("Cannot compare Tag with non-Tag item")
return long(self) < long(other)
def __eq__(self, other):
# Check if comparing with another Tag object; if not, create a temp one
if not isinstance(other, BaseTag):
try:
other = Tag(other)
except:
raise TypeError("Cannot compare Tag with non-Tag item")
return long(self) == long(other)
def __ne__(self, other):
# Check if comparing with another Tag object; if not, create a temp one
if not isinstance(other, BaseTag):
try:
other = Tag(other)
except:
raise TypeError("Cannot compare Tag with non-Tag item")
return long(self) != long(other)
# For python 3, any override of __cmp__ or __eq__ immutable requires
# explicit redirect of hash function to the parent class
# See http://docs.python.org/dev/3.0/reference/datamodel.html#object.__hash__
__hash__ = long.__hash__
def __str__(self):
"""String of tag value as (gggg, eeee)"""
return "({0:04x}, {1:04x})".format(self.group, self.element)
__repr__ = __str__
@property
def group(self):
return self >> 16
@property
def element(self):
"""Return the element part of the (group,element) tag"""
return self & 0xffff
elem = element # alternate syntax
@property
def is_private(self):
"""Return a boolean to indicate whether the tag is a private tag (odd group number)"""
return self.group % 2 == 1
def TupleTag(group_elem):
"""Fast factory for BaseTag object with known safe (group, element) tuple"""
long_value = group_elem[0] << 16 | group_elem[1]
return BaseTag(long_value)
# Define some special tags:
# See PS 3.5-2008 section 7.5 (p.40)
ItemTag = TupleTag((0xFFFE, 0xE000)) # start of Sequence Item
ItemDelimiterTag = TupleTag((0xFFFE, 0xE00D)) # end of Sequence Item
SequenceDelimiterTag = TupleTag((0xFFFE, 0xE0DD)) # end of Sequence of undefined length
| Python |
# misc.py
"""Miscellaneous helper functions"""
# Copyright (c) 2009 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
_size_factors = dict(KB=1024, MB=1024 * 1024, GB=1024 * 1024 * 1024)
def size_in_bytes(expr):
"""Return the number of bytes for a defer_size argument to read_file()
"""
try:
return int(expr)
except ValueError:
unit = expr[-2:].upper()
if unit in _size_factors.keys():
val = float(expr[:-2]) * _size_factors[unit]
return val
else:
raise ValueError("Unable to parse length with unit '{0:s}'".format(unit))
| Python |
# filereader.py
"""Read a dicom media file"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import absolute_import
# Need zlib and io.BytesIO for deflate-compressed file
import os.path
import warnings
import zlib
from io import BytesIO
import logging
from dicom.tag import TupleTag
from dicom.dataelem import RawDataElement
from dicom.util.hexutil import bytes2hex
from dicom.valuerep import extra_length_VRs
from dicom.charset import default_encoding
from dicom import in_py3
logger = logging.getLogger('pydicom')
stat_available = True
try:
from os import stat
except:
stat_available = False
from os import SEEK_CUR
import dicom.UID # for Implicit/Explicit/Little/Big Endian transfer syntax UIDs
from dicom.filebase import DicomFile, DicomFileLike
from dicom.filebase import DicomIO, DicomBytesIO
from dicom.dataset import Dataset, FileDataset
from dicom.datadict import dictionaryVR
from dicom.dataelem import DataElement, DeferredDataElement
from dicom.tag import Tag, ItemTag, ItemDelimiterTag, SequenceDelimiterTag
from dicom.sequence import Sequence
from dicom.misc import size_in_bytes
from dicom.fileutil import absorb_delimiter_item, read_undefined_length_value
from dicom.fileutil import length_of_undefined_length
from struct import Struct, unpack
from sys import byteorder
sys_is_little_endian = (byteorder == 'little')
class InvalidDicomError(Exception):
"""Exception that is raised when the the file does not seem
to be a valid dicom file. This is the case when the four
characters "DICM" are not present at position 128 in the file.
(According to the dicom specification, each dicom file should
have this.)
To force reading the file (because maybe it is a dicom file without
a header), use read_file(..., force=True).
"""
def __init__(self, *args):
if not args:
args = ('The specified file is not a valid DICOM file.',)
Exception.__init__(self, *args)
class DicomIter(object):
"""Iterator over DICOM data elements created from a file-like object
"""
def __init__(self, fp, stop_when=None, force=False):
"""Read the preamble and meta info, prepare iterator for remainder
fp -- an open DicomFileLike object, at start of file
Adds flags to fp: Big/Little-endian and Implicit/Explicit VR
"""
self.fp = fp
self.stop_when = stop_when
self.preamble = preamble = read_preamble(fp, force)
self.has_header = has_header = (preamble is not None)
self.file_meta_info = Dataset()
if has_header:
self.file_meta_info = file_meta_info = _read_file_meta_info(fp)
transfer_syntax = file_meta_info.TransferSyntaxUID
if transfer_syntax == dicom.UID.ExplicitVRLittleEndian:
self._is_implicit_VR = False
self._is_little_endian = True
elif transfer_syntax == dicom.UID.ImplicitVRLittleEndian:
self._is_implicit_VR = True
self._is_little_endian = True
elif transfer_syntax == dicom.UID.ExplicitVRBigEndian:
self._is_implicit_VR = False
self._is_little_endian = False
elif transfer_syntax == dicom.UID.DeflatedExplicitVRLittleEndian:
# See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset
# following the file metadata was prepared the normal way,
# then "deflate" compression applied.
# All that is needed here is to decompress and then
# use as normal in a file-like object
zipped = fp.read()
# -MAX_WBITS part is from comp.lang.python answer:
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
fp = BytesIO(unzipped) # a file-like object
self.fp = fp # point to new object
self._is_implicit_VR = False
self._is_little_endian = True
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE
# by Standard PS 3.5-2008 A.4 (p63)
self._is_implicit_VR = False
self._is_little_endian = True
else: # no header -- make assumptions
fp.TransferSyntaxUID = dicom.UID.ImplicitVRLittleEndian
self._is_little_endian = True
self._is_implicit_VR = True
impl_expl = ("Explicit", "Implicit")[self._is_implicit_VR]
big_little = ("Big", "Little")[self._is_little_endian]
logger.debug("Using {0:s} VR, {1:s} Endian transfer syntax".format(
impl_expl, big_little))
def __iter__(self):
tags = sorted(self.file_meta_info.keys())
for tag in tags:
yield self.file_meta_info[tag]
for data_element in data_element_generator(self.fp,
self._is_implicit_VR, self._is_little_endian,
stop_when=self.stop_when):
yield data_element
def data_element_generator(fp, is_implicit_VR, is_little_endian,
stop_when=None, defer_size=None):
"""Create a generator to efficiently return the raw data elements
Returns (VR, length, raw_bytes, value_tell, is_little_endian),
where:
VR -- None if implicit VR, otherwise the VR read from the file
length -- the length as in the DICOM data element (could be
DICOM "undefined length" 0xffffffffL),
value_bytes -- the raw bytes from the DICOM file
(not parsed into python types)
is_little_endian -- True if transfer syntax is little endian; else False
"""
# Summary of DICOM standard PS3.5-2008 chapter 7:
# If Implicit VR, data element is:
# tag, 4-byte length, value.
# The 4-byte length can be FFFFFFFF (undefined length)*
# If Explicit VR:
# if OB, OW, OF, SQ, UN, or UT:
# tag, VR, 2-bytes reserved (both zero), 4-byte length, value
# For all but UT, the length can be FFFFFFFF (undefined length)*
# else: (any other VR)
# tag, VR, (2 byte length), value
# * for undefined length, a Sequence Delimitation Item marks the end
# of the Value Field.
# Note, except for the special_VRs, both impl and expl VR use 8 bytes;
# the special VRs follow the 8 bytes with a 4-byte length
# With a generator, state is stored, so we can break down
# into the individual cases, and not have to check them again for each
# data element
if is_little_endian:
endian_chr = "<"
else:
endian_chr = ">"
if is_implicit_VR:
element_struct = Struct(endian_chr + "HHL")
else: # Explicit VR
# tag, VR, 2-byte length (or 0 if special VRs)
element_struct = Struct(endian_chr + "HH2sH")
extra_length_struct = Struct(endian_chr + "L") # for special VRs
extra_length_unpack = extra_length_struct.unpack # for lookup speed
# Make local variables so have faster lookup
fp_read = fp.read
fp_tell = fp.tell
logger_debug = logger.debug
debugging = dicom.debugging
element_struct_unpack = element_struct.unpack
while True:
# Read tag, VR, length, get ready to read value
bytes_read = fp_read(8)
if len(bytes_read) < 8:
raise StopIteration # at end of file
if debugging:
debug_msg = "{0:08x}: {1}".format(fp.tell() - 8,
bytes2hex(bytes_read))
if is_implicit_VR:
# must reset VR each time; could have set last iteration (e.g. SQ)
VR = None
group, elem, length = element_struct_unpack(bytes_read)
else: # explicit VR
group, elem, VR, length = element_struct_unpack(bytes_read)
if in_py3:
VR = VR.decode(default_encoding)
if VR in extra_length_VRs:
bytes_read = fp_read(4)
length = extra_length_unpack(bytes_read)[0]
if debugging:
debug_msg += " " + bytes2hex(bytes_read)
if debugging:
debug_msg = "%-47s (%04x, %04x)" % (debug_msg, group, elem)
if not is_implicit_VR:
debug_msg += " %s " % VR
if length != 0xFFFFFFFFL:
debug_msg += "Length: %d" % length
else:
debug_msg += "Length: Undefined length (FFFFFFFF)"
logger_debug(debug_msg)
# Positioned to read the value, but may not want to -- check stop_when
value_tell = fp_tell()
tag = TupleTag((group, elem))
if stop_when is not None:
# XXX VR may be None here!! Should stop_when just take tag?
if stop_when(tag, VR, length):
if debugging:
logger_debug("Reading ended by stop_when callback. "
"Rewinding to start of data element.")
rewind_length = 8
if not is_implicit_VR and VR in extra_length_VRs:
rewind_length += 4
fp.seek(value_tell - rewind_length)
raise StopIteration
# Reading the value
# First case (most common): reading a value with a defined length
if length != 0xFFFFFFFFL:
if defer_size is not None and length > defer_size:
# Flag as deferred by setting value to None, and skip bytes
value = None
logger_debug("Defer size exceeded."
"Skipping forward to next data element.")
fp.seek(fp_tell() + length)
else:
value = fp_read(length)
if debugging:
dotdot = " "
if length > 12:
dotdot = "..."
logger_debug("%08x: %-34s %s %r %s" % (value_tell,
bytes2hex(value[:12]), dotdot, value[:12], dotdot))
yield RawDataElement(tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
# Second case: undefined length - must seek to delimiter,
# unless is SQ type, in which case is easier to parse it, because
# undefined length SQs and items of undefined lengths can be nested
# and it would be error-prone to read to the correct outer delimiter
else:
# Try to look up type to see if is a SQ
# if private tag, won't be able to look it up in dictionary,
# in which case just ignore it and read the bytes unless it is
# identified as a Sequence
if VR is None:
try:
VR = dictionaryVR(tag)
except KeyError:
# Look ahead to see if it consists of items and is thus a SQ
next_tag = TupleTag(unpack(endian_chr + "HH", fp_read(4)))
# Rewind the file
fp.seek(fp_tell() - 4)
if next_tag == ItemTag:
VR = 'SQ'
if VR == 'SQ':
if debugging:
msg = "{0:08x}: Reading/parsing undefined length sequence"
logger_debug(msg.format(fp_tell()))
seq = read_sequence(fp, is_implicit_VR,
is_little_endian, length)
yield DataElement(tag, VR, seq, value_tell,
is_undefined_length=True)
else:
delimiter = SequenceDelimiterTag
if debugging:
logger_debug("Reading undefined length data element")
value = read_undefined_length_value(fp, is_little_endian,
delimiter, defer_size)
yield RawDataElement(tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
stop_when=None, defer_size=None):
"""Return a Dataset instance containing the next dataset in the file.
:param fp: an opened file object
:param is_implicit_VR: True if file transfer syntax is implicit VR
:param is_little_endian: True if file has little endian transfer syntax
:param bytelength: None to read until end of file or ItemDeliterTag, else
a fixed number of bytes to read
:param stop_when: optional call_back function which can terminate reading.
See help for data_element_generator for details
:param defer_size: optional size to avoid loading large elements in memory.
See help for data_element_generator for details
:returns: a Dataset instance
"""
raw_data_elements = dict()
fpStart = fp.tell()
de_gen = data_element_generator(fp, is_implicit_VR, is_little_endian,
stop_when, defer_size)
try:
while (bytelength is None) or (fp.tell() - fpStart < bytelength):
raw_data_element = next(de_gen)
# Read data elements. Stop on some errors, but return what was read
tag = raw_data_element.tag
# Check for ItemDelimiterTag --dataset is an item in a sequence
if tag == (0xFFFE, 0xE00D):
break
raw_data_elements[tag] = raw_data_element
except StopIteration:
pass
except EOFError as details:
# XXX is this error visible enough to user code with just logging?
logger.error(str(details) + " in file " +
getattr(fp, "name", "<no filename>"))
except NotImplementedError as details:
logger.error(details)
return Dataset(raw_data_elements)
def read_sequence(fp, is_implicit_VR, is_little_endian, bytelength, offset=0):
"""Read and return a Sequence -- i.e. a list of Datasets"""
seq = [] # use builtin list to start for speed, convert to Sequence at end
is_undefined_length = False
if bytelength != 0: # SQ of length 0 possible (PS 3.5-2008 7.5.1a (p.40)
if bytelength == 0xffffffffL:
is_undefined_length = True
bytelength = None
fp_tell = fp.tell # for speed in loop
fpStart = fp_tell()
while (not bytelength) or (fp_tell() - fpStart < bytelength):
file_tell = fp.tell()
dataset = read_sequence_item(fp, is_implicit_VR, is_little_endian)
if dataset is None: # None is returned if hit Sequence Delimiter
break
dataset.file_tell = file_tell + offset
seq.append(dataset)
seq = Sequence(seq)
seq.is_undefined_length = is_undefined_length
return seq
def read_sequence_item(fp, is_implicit_VR, is_little_endian):
"""Read and return a single sequence item, i.e. a Dataset"""
if is_little_endian:
tag_length_format = "<HHL"
else:
tag_length_format = ">HHL"
try:
bytes_read = fp.read(8)
group, element, length = unpack(tag_length_format, bytes_read)
except:
raise IOError("No tag to read at file position "
"{0:05x}".format(fp.tell()))
tag = (group, element)
if tag == SequenceDelimiterTag: # No more items, time to stop reading
data_element = DataElement(tag, None, None, fp.tell() - 4)
logger.debug("{0:08x}: {1}".format(fp.tell() - 8, "End of Sequence"))
if length != 0:
logger.warning("Expected 0x00000000 after delimiter, found 0x%x,"
" at position 0x%x" % (length, fp.tell() - 4))
return None
if tag != ItemTag:
logger.warning("Expected sequence item with tag %s at file position "
"0x%x" % (ItemTag, fp.tell() - 4))
else:
logger.debug("{0:08x}: {1} Found Item tag (start of item)".format(
fp.tell() - 4, bytes2hex(bytes_read)))
is_undefined_length = False
if length == 0xFFFFFFFFL:
ds = read_dataset(fp, is_implicit_VR, is_little_endian,
bytelength=None)
ds.is_undefined_length_sequence_item = True
else:
ds = read_dataset(fp, is_implicit_VR, is_little_endian, length)
logger.debug("%08x: Finished sequence item" % fp.tell())
return ds
def not_group2(tag, VR, length):
return (tag.group != 2)
def _read_file_meta_info(fp):
"""Return the file meta information.
fp must be set after the 128 byte preamble and 'DICM' marker
"""
# File meta info always LittleEndian, Explicit VR. After will change these
# to the transfer syntax values set in the meta info
# Get group length data element, whose value is the length of the meta_info
fp_save = fp.tell() # in case need to rewind
debugging = dicom.debugging
if debugging:
logger.debug("Try to read group length info...")
bytes_read = fp.read(8)
group, elem, VR, length = unpack("<HH2sH", bytes_read)
if debugging:
debug_msg = "{0:08x}: {1}".format(fp.tell() - 8, bytes2hex(bytes_read))
if in_py3:
VR = VR.decode(default_encoding)
if VR in extra_length_VRs:
bytes_read = fp.read(4)
length = unpack("<L", bytes_read)[0]
if debugging:
debug_msg += " " + bytes2hex(bytes_read)
if debugging:
debug_msg = "{0:<47s} ({1:04x}, {2:04x}) {3:2s} Length: {4:d}".format(
debug_msg, group, elem, VR, length)
logger.debug(debug_msg)
# Store meta group length if it exists, then read until not group 2
if group == 2 and elem == 0:
bytes_read = fp.read(length)
if debugging:
logger.debug("{0:08x}: {1}".format(fp.tell() - length,
bytes2hex(bytes_read)))
group_length = unpack("<L", bytes_read)[0]
expected_ds_start = fp.tell() + group_length
if debugging:
msg = "value (group length) = {0:d}".format(group_length)
msg += " regular dataset should start at {0:08x}".format(
expected_ds_start)
logger.debug(" " * 10 + msg)
else:
expected_ds_start = None
if debugging:
logger.debug(" " * 10 + "(0002,0000) Group length not found.")
# Changed in pydicom 0.9.7 -- don't trust the group length, just read
# until no longer group 2 data elements. But check the length and
# give a warning if group 2 ends at different location.
# Rewind to read the first data element as part of the file_meta dataset
if debugging:
logger.debug("Rewinding and reading whole dataset "
"including this first data element")
fp.seek(fp_save)
file_meta = read_dataset(fp, is_implicit_VR=False,
is_little_endian=True, stop_when=not_group2)
fp_now = fp.tell()
if expected_ds_start and fp_now != expected_ds_start:
logger.info("*** Group length for file meta dataset "
"did not match end of group 2 data ***")
else:
if debugging:
logger.debug("--- End of file meta data found "
"as expected ---------")
return file_meta
def read_file_meta_info(filename):
"""Read and return the DICOM file meta information only.
This function is meant to be used in user code, for quickly going through
a series of files to find one which is referenced to a particular SOP,
without having to read the entire files.
"""
fp = DicomFile(filename, 'rb')
preamble = read_preamble(fp, False) # if no header, raise exception
return _read_file_meta_info(fp)
def read_preamble(fp, force):
"""Read and return the DICOM preamble and read past the 'DICM' marker.
If 'DICM' does not exist, assume no preamble, return None, and
rewind file to the beginning..
"""
logger.debug("Reading preamble...")
preamble = fp.read(0x80)
if dicom.debugging:
sample = bytes2hex(preamble[:8]) + "..." + bytes2hex(preamble[-8:])
logger.debug("{0:08x}: {1}".format(fp.tell() - 0x80, sample))
magic = fp.read(4)
if magic != b"DICM":
if force:
logger.info("File is not a standard DICOM file; 'DICM' header is "
"missing. Assuming no header and continuing")
preamble = None
fp.seek(0)
else:
raise InvalidDicomError("File is missing 'DICM' marker. "
"Use force=True to force reading")
else:
logger.debug("{0:08x}: 'DICM' marker found".format(fp.tell() - 4))
return preamble
def _at_pixel_data(tag, VR, length):
return tag == (0x7fe0, 0x0010)
def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
"""Parse a DICOM file until a condition is met
``read_partial`` is normally not called directly. Use ``read_file``
instead, unless you need to stop on some condition
other than reaching pixel data.
:arg fileobj: a file-like object. This function does not close it.
:arg stop_when: a callable which takes tag, VR, length,
and returns True or False.
If stop_when returns True,
read_data_element will raise StopIteration.
If None (default), then the whole file is read.
:returns: a set instance
"""
# Read preamble -- raise an exception if missing and force=False
preamble = read_preamble(fileobj, force)
file_meta_dataset = Dataset()
# Assume a transfer syntax, correct it as necessary
is_implicit_VR = True
is_little_endian = True
if preamble:
file_meta_dataset = _read_file_meta_info(fileobj)
transfer_syntax = file_meta_dataset.TransferSyntaxUID
if transfer_syntax == dicom.UID.ImplicitVRLittleEndian:
pass
elif transfer_syntax == dicom.UID.ExplicitVRLittleEndian:
is_implicit_VR = False
elif transfer_syntax == dicom.UID.ExplicitVRBigEndian:
is_implicit_VR = False
is_little_endian = False
elif transfer_syntax == dicom.UID.DeflatedExplicitVRLittleEndian:
# See PS3.6-2008 A.5 (p 71)
# when written, the entire dataset following
# the file metadata was prepared the normal way,
# then "deflate" compression applied.
# All that is needed here is to decompress and then
# use as normal in a file-like object
zipped = fileobj.read()
# -MAX_WBITS part is from comp.lang.python answer:
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
fileobj = BytesIO(unzipped) # a file-like object
is_implicit_VR = False
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE
# by Standard PS 3.5-2008 A.4 (p63)
is_implicit_VR = False
else: # no header -- use the is_little_endian, implicit assumptions
file_meta_dataset.TransferSyntaxUID = dicom.UID.ImplicitVRLittleEndian
try:
dataset = read_dataset(fileobj, is_implicit_VR, is_little_endian,
stop_when=stop_when, defer_size=defer_size)
except EOFError as e:
pass # error already logged in read_dataset
return FileDataset(fileobj, dataset, preamble, file_meta_dataset,
is_implicit_VR, is_little_endian)
def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
"""Read and parse a DICOM file
:param fp: either a file-like object, or a string containing the file name.
If a file-like object, the caller is responsible for closing it.
:param defer_size: if a data element value is larger than defer_size,
then the value is not read into memory until it is accessed in code.
Specify an integer (bytes), or a string value with units:
e.g. "512 KB", "2 MB".
Default None means all elements read into memory.
:param stop_before_pixels: Set True to stop before reading pixels
(and anything after them).
If False (default), the full file will be read and parsed.
:param force: Set to True to force reading even if no header is found.
If False, a dicom.filereader.InvalidDicomError is raised
when the file is not valid DICOM.
:returns: a FileDataset instance
"""
# Open file if not already a file object
caller_owns_file = True
if isinstance(fp, basestring):
# caller provided a file name; we own the file handle
caller_owns_file = False
logger.debug("Reading file '{0}'".format(fp))
fp = open(fp, 'rb')
if dicom.debugging:
logger.debug("\n" + "-" * 80)
logger.debug("Call to read_file()")
msg = ("filename:'%s', defer_size='%s'"
", stop_before_pixels=%s, force=%s")
logger.debug(msg % (fp.name, defer_size, stop_before_pixels, force))
if caller_owns_file:
logger.debug("Caller passed file object")
else:
logger.debug("Caller passed file name")
logger.debug("-" * 80)
# Convert size to defer reading into bytes, and store in file object
# if defer_size is not None:
# defer_size = size_in_bytes(defer_size)
# fp.defer_size = defer_size
# Iterate through all items and store them --include file meta if present
stop_when = None
if stop_before_pixels:
stop_when = _at_pixel_data
try:
dataset = read_partial(fp, stop_when, defer_size=defer_size,
force=force)
finally:
if not caller_owns_file:
fp.close()
# XXX need to store transfer syntax etc.
return dataset
def data_element_offset_to_value(is_implicit_VR, VR):
"""Return number of bytes from start of data element to start of value"""
if is_implicit_VR:
offset = 8 # tag of 4 plus 4-byte length
else:
if VR in extra_length_VRs:
offset = 12 # tag 4 + 2 VR + 2 reserved + 4 length
else:
offset = 8 # tag 4 + 2 VR + 2 length
return offset
def read_deferred_data_element(fileobj_type, filename, timestamp,
raw_data_elem):
"""Read the previously deferred value from the file into memory
and return a raw data element"""
logger.debug("Reading deferred element %r" % str(raw_data_elem.tag))
# If it wasn't read from a file, then return an error
if filename is None:
raise IOError("Deferred read -- original filename not stored. "
"Cannot re-open")
# Check that the file is the same as when originally read
if not os.path.exists(filename):
raise IOError("Deferred read -- original file "
"{0:s} is missing".format(filename))
if stat_available and (timestamp is not None):
statinfo = stat(filename)
if statinfo.st_mtime != timestamp:
warnings.warn("Deferred read warning -- file modification time "
"has changed.")
# Open the file, position to the right place
# fp = self.typefileobj(self.filename, "rb")
fp = fileobj_type(filename, 'rb')
is_implicit_VR = raw_data_elem.is_implicit_VR
is_little_endian = raw_data_elem.is_little_endian
offset = data_element_offset_to_value(is_implicit_VR, raw_data_elem.VR)
fp.seek(raw_data_elem.value_tell - offset)
elem_gen = data_element_generator(fp, is_implicit_VR, is_little_endian,
defer_size=None)
# Read the data element and check matches what was stored before
data_elem = next(elem_gen)
fp.close()
if data_elem.VR != raw_data_elem.VR:
raise ValueError("Deferred read VR {0:s} does not match "
"original {1:s}".format(data_elem.VR, raw_data_elem.VR))
if data_elem.tag != raw_data_elem.tag:
raise ValueError("Deferred read tag {0!r} does not match "
"original {1!r}".format(data_elem.tag, raw_data_elem.tag))
# Everything is ok, now this object should act like usual DataElement
return data_elem
| Python |
# fileutil.py
"""Functions for reading to certain bytes, e.g. delimiters"""
# Copyright (c) 2009-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from struct import pack, unpack
from dicom.tag import TupleTag, Tag
from dicom.datadict import dictionary_description
import logging
logger = logging.getLogger('pydicom')
def absorb_delimiter_item(fp, is_little_endian, delimiter):
"""Read (and ignore) undefined length sequence or item terminators."""
if is_little_endian:
struct_format = "<HHL"
else:
struct_format = ">HHL"
group, elem, length = unpack(struct_format, fp.read(8))
tag = TupleTag((group, elem))
if tag != delimiter:
msg = "Did not find expected delimiter '%s'" % dictionary_description(delimiter)
msg += ", instead found %s at file position 0x%x" % (str(tag), fp.tell() - 8)
logger.warn(msg)
fp.seek(fp.tell() - 8)
return
logger.debug("%04x: Found Delimiter '%s'", fp.tell() - 8, dictionary_description(delimiter))
if length == 0:
logger.debug("%04x: Read 0 bytes after delimiter", fp.tell() - 4)
else:
logger.debug("%04x: Expected 0x00000000 after delimiter, found 0x%x", fp.tell() - 4, length)
def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
"""Read in the file until a specific byte sequence found
bytes_to_find -- a string containing the bytes to find. Must be in correct
endian order already
read_size -- number of bytes to read at a time
"""
data_start = fp.tell()
search_rewind = len(bytes_to_find) - 1
found = False
EOF = False
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
# try again - if still don't get required amount, this is last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
EOF = True # but will still check whatever we did get
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
elif EOF:
if rewind:
fp.seek(data_start)
return None
else:
fp.seek(fp.tell() - search_rewind) # rewind a bit in case delimiter crossed read_size boundary
# if get here then have found the byte string
found_at = chunk_start + index
if rewind:
fp.seek(data_start)
else:
fp.seek(found_at + len(bytes_to_find))
return found_at
def read_undefined_length_value(fp, is_little_endian, delimiter_tag, defer_size=None,
read_size=128):
"""Read until the delimiter tag found and return the value, ignore the delimiter
fp -- a file-like object with read(), seek() functions
is_little_endian -- True if file transfer syntax is little endian, else False
read_size -- number of bytes to read at one time (default 128)
On completion, the file will be set to the first byte after the delimiter and its
following four zero bytes.
If end-of-file is hit before the delimiter was found, raises EOFError
"""
data_start = fp.tell()
search_rewind = 3
if is_little_endian:
bytes_format = b"<HH"
else:
bytes_format = b">HH"
bytes_to_find = pack(bytes_format, delimiter_tag.group, delimiter_tag.elem)
found = False
EOF = False
value_chunks = []
byte_count = 0 # for defer_size checks
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
# try again - if still don't get required amount, this is last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
EOF = True # but will still check whatever we did get
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
new_bytes = bytes_read[:index]
byte_count += len(new_bytes)
if defer_size is None or byte_count < defer_size:
value_chunks.append(bytes_read[:index])
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
length = fp.read(4)
if length != b"\0\0\0\0":
msg = "Expected 4 zero bytes after undefined length delimiter at pos {0:04x}"
logger.error(msg.format(fp.tell() - 4))
elif EOF:
fp.seek(data_start)
raise EOFError("End of file reached before delimiter {0!r} found".format(delimiter_tag))
else:
fp.seek(fp.tell() - search_rewind) # rewind a bit in case delimiter crossed read_size boundary
# accumulate the bytes read (not including the rewind)
new_bytes = bytes_read[:-search_rewind]
byte_count += len(new_bytes)
if defer_size is None or byte_count < defer_size:
value_chunks.append(new_bytes)
# if get here then have found the byte string
if defer_size is not None and defer_size >= defer_size:
return None
else:
return b"".join(value_chunks)
def find_delimiter(fp, delimiter, is_little_endian, read_size=128, rewind=True):
"""Return file position where 4-byte delimiter is located.
Return None if reach end of file without finding the delimiter.
On return, file position will be where it was before this function,
unless rewind argument is False.
"""
struct_format = "<H"
if not is_little_endian:
struct_format = ">H"
delimiter = Tag(delimiter)
bytes_to_find = pack(struct_format, delimiter.group) + pack(struct_format, delimiter.elem)
return find_bytes(fp, bytes_to_find, rewind=rewind)
def length_of_undefined_length(fp, delimiter, is_little_endian, read_size=128, rewind=True):
"""Search through the file to find the delimiter, return the length of the data
element.
Return the file to the start of the data, ready to read it.
Note the data element that the delimiter starts is not read here, the calling
routine must handle that.
delimiter must be 4 bytes long
rewind == if True, file will be returned to position before seeking the bytes
"""
chunk = 0
data_start = fp.tell()
delimiter_pos = find_delimiter(fp, delimiter, is_little_endian, rewind=rewind)
length = delimiter_pos - data_start
return length
def read_delimiter_item(fp, delimiter):
"""Read and ignore an expected delimiter.
If the delimiter is not found or correctly formed, a warning is logged.
"""
found = fp.read(4)
if found != delimiter:
logger.warn("Expected delimitor %s, got %s at file position 0x%x", Tag(delimiter), Tag(found), fp.tell() - 4)
length = fp.read_UL()
if length != 0:
logger.warn("Expected delimiter item to have length 0, got %d at file position 0x%x", length, fp.tell() - 4)
| Python |
# dataset.py
"""Module for Dataset class
Overview of Dicom object model:
Dataset(derived class of Python's dict class)
contains DataElement instances (DataElement is a class with tag, VR, value)
the value can be a Sequence instance
(Sequence is derived from Python's list),
or just a regular value like a number, string, etc.,
or a list of regular values, e.g. a 3d coordinate
Sequence's are a list of Datasets (note recursive nature here)
"""
#
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
import sys
from sys import byteorder
sys_is_little_endian = (byteorder == 'little')
import logging
logger = logging.getLogger('pydicom')
import inspect # for __dir__
from dicom.datadict import DicomDictionary, dictionaryVR
from dicom.datadict import tag_for_name, all_names_for_tag
from dicom.tag import Tag, BaseTag
from dicom.dataelem import DataElement, DataElement_from_raw, RawDataElement
from dicom.UID import NotCompressedPixelTransferSyntaxes
import os.path
import io
import dicom # for write_file
import dicom.charset
import warnings
have_numpy = True
try:
import numpy
except:
have_numpy = False
stat_available = True
try:
from os import stat
except:
stat_available = False
class PropertyError(Exception):
# http://docs.python.org/release/3.1.3/tutorial/errors.html#tut-userexceptions
"""For AttributeErrors caught in a property, so do not go to __getattr__"""
pass
class Dataset(dict):
"""A collection (dictionary) of Dicom `DataElement` instances.
Example of two ways to retrieve or set values:
1. dataset[0x10, 0x10].value --> patient's name
2. dataset.PatientName --> patient's name
Example (2) uses DICOM "keywords", defined starting in 2011 standard.
PatientName is not actually a member of the object, but unknown member
requests are checked against the DICOM dictionary. If the name matches a
DicomDictionary descriptive string, the corresponding tag is used
to look up or set the `DataElement` instance's value.
:attribute indent_chars: for string display, the characters used to indent
nested Data Elements (e.g. sequence items). Default is three spaces.
"""
indent_chars = " "
def add(self, data_element):
"""Equivalent to dataset[data_element.tag] = data_element."""
self[data_element.tag] = data_element
def add_new(self, tag, VR, value):
"""Create a new DataElement instance and add it to this Dataset."""
data_element = DataElement(tag, VR, value)
# use data_element.tag since DataElement verified it
self[data_element.tag] = data_element
def data_element(self, name):
"""Return the full data_element instance for the given descriptive name
:param name: a DICOM keyword
:returns: a DataElement instance in this dataset with the given name
if the tag for that name is not found, returns None
"""
tag = tag_for_name(name)
if tag:
return self[tag]
return None
def __contains__(self, name):
"""Extend dict.__contains__() to handle DICOM keywords.
This is called for code like: ``if 'SliceLocation' in dataset``.
"""
if isinstance(name, (str, unicode)):
tag = tag_for_name(name)
else:
try:
tag = Tag(name)
except:
return False
if tag:
return dict.__contains__(self, tag)
else:
return dict.__contains__(self, name) # will no doubt raise an exception
def decode(self):
"""Apply character set decoding to all data elements.
See DICOM PS3.5-2008 6.1.1.
"""
# Find specific character set. 'ISO_IR 6' is default
# May be multi-valued, but let dicom.charset handle all logic on that
dicom_character_set = self.get('SpecificCharacterSet', "ISO_IR 6")
# Shortcut to the decode function in dicom.charset
decode_data_element = dicom.charset.decode
# Callback for walk(), to decode the chr strings if necessary
# This simply calls the dicom.charset.decode function
def decode_callback(ds, data_element):
decode_data_element(data_element, dicom_character_set)
# Use the walk function to go through all elements and convert them
self.walk(decode_callback)
def __delattr__(self, name):
"""Intercept requests to delete an attribute by name, e.g. del ds.name
If name is a dicom descriptive string (cleaned with CleanName),
then delete the corresponding tag and data_element.
Else, delete an instance (python) attribute as any other class would do
"""
# First check if is a valid DICOM name and if we have that data element
tag = tag_for_name(name)
if tag and tag in self:
del self[tag]
# If not a DICOM name in this dataset, check for regular instance name
# can't do delete directly, that will call __delattr__ again
elif name in self.__dict__:
del self.__dict__[name]
# Not found, raise an error in same style as python does
else:
raise AttributeError(name)
def __dir__(self):
"""Give a list of attributes available in the dataset
List of attributes is used, for example, in auto-completion in editors
or command-line environments.
"""
# Force zip object into a list in case of python3. Also backwards
# compatible
meths = set(list(zip(
*inspect.getmembers(Dataset, inspect.isroutine)))[0])
props = set(list(zip(
*inspect.getmembers(Dataset, inspect.isdatadescriptor)))[0])
dicom_names = set(self.dir())
alldir = sorted(props | meths | dicom_names)
return alldir
def dir(self, *filters):
"""Return an alphabetical list of data_element keywords in the dataset.
Intended mainly for use in interactive Python sessions.
:param filters: zero or more string arguments to the function. Used for
case-insensitive match to any part of the DICOM name.
:returns: All data_element names in this dataset matching the filters.
If no filters, return all DICOM keywords in the dataset
"""
allnames = []
for tag, data_element in self.items():
allnames.extend(all_names_for_tag(tag))
# remove blanks - tags without valid names (e.g. private tags)
allnames = [x for x in allnames if x]
# Store found names in a dict, so duplicate names appear only once
matches = {}
for filter_ in filters:
filter_ = filter_.lower()
match = [x for x in allnames if x.lower().find(filter_) != -1]
matches.update(dict([(x, 1) for x in match]))
if filters:
names = sorted(matches.keys())
return names
else:
return sorted(allnames)
def get(self, key, default=None):
"""Extend dict.get() to handle DICOM keywords"""
if isinstance(key, (str, unicode)):
try:
return getattr(self, key)
except AttributeError:
return default
else:
# is not a string, try to make it into a tag and then hand it
# off to the underlying dict
if not isinstance(key, BaseTag):
try:
key = Tag(key)
except:
raise TypeError("Dataset.get key must be a string or tag")
try:
return_val = self.__getitem__(key)
except KeyError:
return_val = default
return return_val
def __getattr__(self, name):
"""Intercept requests for unknown Dataset python-attribute names.
If the name matches a Dicom keyword,
return the value for the data_element with the corresponding tag.
"""
# __getattr__ only called if instance cannot find name in self.__dict__
# So, if name is not a dicom string, then is an error
tag = tag_for_name(name)
if tag is None:
raise AttributeError("Dataset does not have attribute "
"'{0:s}'.".format(name))
tag = Tag(tag)
if tag not in self:
raise AttributeError("Dataset does not have attribute "
"'{0:s}'.".format(name))
else: # do have that dicom data_element
return self[tag].value
def __getitem__(self, key):
"""Operator for dataset[key] request."""
tag = Tag(key)
data_elem = dict.__getitem__(self, tag)
if isinstance(data_elem, DataElement):
return data_elem
elif isinstance(data_elem, tuple):
# If a deferred read, then go get the value now
if data_elem.value is None:
from dicom.filereader import read_deferred_data_element
data_elem = read_deferred_data_element(self.fileobj_type,
self.filename, self.timestamp, data_elem)
# Not converted from raw form read from file yet; do so now
self[tag] = DataElement_from_raw(data_elem)
return dict.__getitem__(self, tag)
def group_dataset(self, group):
"""Return a Dataset containing only data_elements of a certain group.
:param group: the group part of a dicom (group, element) tag.
:returns: a dataset instance containing data elements of the group
specified
"""
ds = Dataset()
ds.update(dict([(tag, data_element) for tag, data_element in self.items()
if tag.group == group]))
return ds
def __iter__(self):
"""Method to iterate through the dataset, returning data_elements.
e.g.:
for data_element in dataset:
do_something...
The data_elements are returned in DICOM order,
i.e. in increasing order by tag value.
Sequence items are returned as a single data_element; it is up to the
calling code to recurse into the Sequence items if desired
"""
# Note this is different than the underlying dict class,
# which returns the key of the key:value mapping.
# Here the value is returned (but data_element.tag has the key)
taglist = sorted(self.keys())
for tag in taglist:
yield self[tag]
def _pixel_data_numpy(self):
"""Return a NumPy array of the pixel data.
NumPy is a numerical package for python. It is used if available.
:raises TypeError: if no pixel data in this dataset.
:raises ImportError: if cannot import numpy.
"""
if not 'PixelData' in self:
raise TypeError("No pixel data found in this dataset.")
if not have_numpy:
msg = "The Numpy package is required to use pixel_array, and numpy could not be imported.\n"
raise ImportError(msg)
# determine the type used for the array
need_byteswap = (self.is_little_endian != sys_is_little_endian)
# Make NumPy format code, e.g. "uint16", "int32" etc
# from two pieces of info:
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
# self.BitsAllocated -- 8, 16, or 32
format_str = '%sint%d' % (('u', '')[self.PixelRepresentation],
self.BitsAllocated)
try:
numpy_format = numpy.dtype(format_str)
except TypeError:
raise TypeError("Data type not understood by NumPy: "
"format='%s', PixelRepresentation=%d, BitsAllocated=%d" % (
numpy_format, self.PixelRepresentation, self.BitsAllocated))
# Have correct Numpy format, so create the NumPy array
arr = numpy.fromstring(self.PixelData, numpy_format)
# XXX byte swap - may later handle this in read_file!!?
if need_byteswap:
arr.byteswap(True) # True means swap in-place, don't make a new copy
# Note the following reshape operations return a new *view* onto arr, but don't copy the data
if 'NumberOfFrames' in self and self.NumberOfFrames > 1:
if self.SamplesPerPixel > 1:
arr = arr.reshape(self.SamplesPerPixel, self.NumberOfFrames, self.Rows, self.Columns)
else:
arr = arr.reshape(self.NumberOfFrames, self.Rows, self.Columns)
else:
if self.SamplesPerPixel > 1:
if self.BitsAllocated == 8:
arr = arr.reshape(self.SamplesPerPixel, self.Rows, self.Columns)
else:
raise NotImplementedError("This code only handles SamplesPerPixel > 1 if Bits Allocated = 8")
else:
arr = arr.reshape(self.Rows, self.Columns)
return arr
# Use by pixel_array property
def _get_pixel_array(self):
# Check if pixel data is in a form we know how to make into an array
# XXX uses file_meta here, should really only be thus for FileDataset
if self.file_meta.TransferSyntaxUID not in NotCompressedPixelTransferSyntaxes:
raise NotImplementedError("Pixel Data is compressed in a format pydicom does not yet handle. Cannot return array")
# Check if already have converted to a NumPy array
# Also check if self.PixelData has changed. If so, get new NumPy array
already_have = True
if not hasattr(self, "_pixel_array"):
already_have = False
elif self._pixel_id != id(self.PixelData):
already_have = False
if not already_have:
self._pixel_array = self._pixel_data_numpy()
self._pixel_id = id(self.PixelData) # is this guaranteed to work if memory is re-used??
return self._pixel_array
@property
def pixel_array(self):
"""Return the pixel data as a NumPy array"""
try:
return self._get_pixel_array()
except AttributeError:
t, e, tb = sys.exc_info()
raise PropertyError("AttributeError in pixel_array property: " + \
e.args[0]), None, tb
# Format strings spec'd according to python string formatting options
# See http://docs.python.org/library/stdtypes.html#string-formatting-operations
default_element_format = "%(tag)s %(name)-35.35s %(VR)s: %(repval)s"
default_sequence_element_format = "%(tag)s %(name)-35.35s %(VR)s: %(repval)s"
def formatted_lines(self, element_format=default_element_format,
sequence_element_format=default_sequence_element_format,
indent_format=None):
"""A generator to give back a formatted string representing each line
one at a time. Example:
for line in dataset.formatted_lines("%(name)s=%(repval)s", "SQ:%(name)s=%(repval)s"):
print(line)
See the source code for default values which illustrate some of the names that can be used in the
format strings
indent_format -- not used in current version. Placeholder for future functionality.
"""
for data_element in self.iterall():
# Get all the attributes possible for this data element (e.g.
# gets descriptive text name too)
# This is the dictionary of names that can be used in the format string
elem_dict = dict([(x, getattr(data_element, x)()
if callable(getattr(data_element, x))
else getattr(data_element, x))
for x in dir(data_element) if not x.startswith("_")])
if data_element.VR == "SQ":
yield sequence_element_format % elem_dict
else:
yield element_format % elem_dict
def _pretty_str(self, indent=0, topLevelOnly=False):
"""Return a string of the data_elements in this dataset, with indented levels.
This private method is called by the __str__() method
for handling print statements or str(dataset), and the __repr__() method.
It is also used by top(), which is the reason for the topLevelOnly flag.
This function recurses, with increasing indentation levels.
"""
strings = []
indentStr = self.indent_chars * indent
nextIndentStr = self.indent_chars * (indent + 1)
for data_element in self:
if data_element.VR == "SQ": # a sequence
strings.append(indentStr + str(data_element.tag) + " %s %i item(s) ---- " % (data_element.description(), len(data_element.value)))
if not topLevelOnly:
for dataset in data_element.value:
strings.append(dataset._pretty_str(indent + 1))
strings.append(nextIndentStr + "---------")
else:
strings.append(indentStr + repr(data_element))
return "\n".join(strings)
def remove_private_tags(self):
"""Remove all Dicom private tags in this dataset and those contained within."""
def RemoveCallback(dataset, data_element):
"""Internal method to use as callback to walk() method."""
if data_element.tag.is_private:
# can't del self[tag] - won't be right dataset on recursion
del dataset[data_element.tag]
self.walk(RemoveCallback)
def save_as(self, filename, WriteLikeOriginal=True):
"""Write the dataset to a file.
filename -- full path and filename to save the file to
WriteLikeOriginal -- see dicom.filewriter.write_file for info on this parameter.
"""
dicom.write_file(filename, self, WriteLikeOriginal)
def __setattr__(self, name, value):
"""Intercept any attempts to set a value for an instance attribute.
If name is a dicom descriptive string (cleaned with CleanName),
then set the corresponding tag and data_element.
Else, set an instance (python) attribute as any other class would do.
"""
tag = tag_for_name(name)
if tag is not None: # successfully mapped name to a tag
if tag not in self: # don't have this tag yet->create the data_element instance
VR = dictionaryVR(tag)
data_element = DataElement(tag, VR, value)
else: # already have this data_element, just changing its value
data_element = self[tag]
data_element.value = value
# Now have data_element - store it in this dict
self[tag] = data_element
else: # name not in dicom dictionary - setting a non-dicom instance attribute
# XXX note if user mis-spells a dicom data_element - no error!!!
self.__dict__[name] = value
def __setitem__(self, key, value):
"""Operator for dataset[key]=value. Check consistency, and deal with private tags"""
if not isinstance(value, (DataElement, RawDataElement)): # ok if is subclass, e.g. DeferredDataElement
raise TypeError("Dataset contents must be DataElement instances.\n" + \
"To set a data_element value use data_element.value=val")
tag = Tag(value.tag)
if key != tag:
raise ValueError("data_element.tag must match the dictionary key")
data_element = value
if tag.is_private:
# See PS 3.5-2008 section 7.8.1 (p. 44) for how blocks are reserved
logger.debug("Setting private tag %r" % tag)
private_block = tag.elem >> 8
private_creator_tag = Tag(tag.group, private_block)
if private_creator_tag in self and tag != private_creator_tag:
if isinstance(data_element, RawDataElement):
data_element = DataElement_from_raw(data_element)
data_element.private_creator = self[private_creator_tag].value
dict.__setitem__(self, tag, data_element)
def __str__(self):
"""Handle str(dataset)."""
return self._pretty_str()
def top(self):
"""Show the DICOM tags, but only the top level; do not recurse into Sequences"""
return self._pretty_str(topLevelOnly=True)
def trait_names(self):
"""Return a list of valid names for auto-completion code
Used in IPython, so that data element names can be found
and offered for autocompletion on the IPython command line
"""
return dir(self) # only valid python >=2.6, else use self.__dir__()
def update(self, dictionary):
"""Extend dict.update() to handle DICOM keywords."""
for key, value in dictionary.items():
if isinstance(key, (str, unicode)):
setattr(self, key, value)
else:
self[Tag(key)] = value
def iterall(self):
"""Iterate through the dataset, yielding all data elements.
Unlike Dataset.__iter__, this *does* recurse into sequences,
and so returns all data elements as if the file were "flattened".
"""
for data_element in self:
yield data_element
if data_element.VR == "SQ":
sequence = data_element.value
for dataset in sequence:
for elem in dataset.iterall():
yield elem
def walk(self, callback):
"""Call the given function for all dataset data_elements (recurses).
Visit all data_elements, recurse into sequences and their datasets,
The callback function is called for each data_element
(including SQ element).
Can be used to perform an operation on certain types of data_elements.
E.g., `remove_private_tags`() finds all private tags and deletes them.
:param callback: a callable taking two arguments: a dataset, and
a data_element belonging to that dataset.
`DataElement`s will come back in DICOM order (by increasing tag number
within their dataset)
"""
taglist = sorted(self.keys())
for tag in taglist:
data_element = self[tag]
callback(self, data_element) # self = this Dataset
# 'tag in self' below needed in case callback deleted data_element
if tag in self and data_element.VR == "SQ":
sequence = data_element.value
for dataset in sequence:
dataset.walk(callback)
__repr__ = __str__
class FileDataset(Dataset):
def __init__(self, filename_or_obj, dataset, preamble=None, file_meta=None,
is_implicit_VR=True, is_little_endian=True):
"""Initialize a dataset read from a DICOM file
:param filename: full path and filename to the file. Use None if is a BytesIO.
:param dataset: some form of dictionary, usually a Dataset from read_dataset()
:param preamble: the 128-byte DICOM preamble
:param file_meta: the file meta info dataset, as returned by _read_file_meta,
or an empty dataset if no file meta information is in the file
:param is_implicit_VR: True if implicit VR transfer syntax used; False if explicit VR. Default is True.
:param is_little_endian: True if little-endian transfer syntax used; False if big-endian. Default is True.
"""
Dataset.__init__(self, dataset)
self.preamble = preamble
self.file_meta = file_meta
self.is_implicit_VR = is_implicit_VR
self.is_little_endian = is_little_endian
if isinstance(filename_or_obj, basestring):
self.filename = filename_or_obj
self.fileobj_type = open
elif isinstance(filename_or_obj, io.BufferedReader):
self.filename = filename_or_obj.name
# This is the appropriate constructor for io.BufferedReader
self.fileobj_type = open
else:
self.fileobj_type = filename_or_obj.__class__ # use __class__ python <2.7?; http://docs.python.org/reference/datamodel.html
if getattr(filename_or_obj, "name", False):
self.filename = filename_or_obj.name
elif getattr(filename_or_obj, "filename", False): # gzip python <2.7?
self.filename = filename_or_obj.filename
else:
self.filename = None # e.g. came from BytesIO or something file-like
self.timestamp = None
if stat_available and self.filename and os.path.exists(self.filename):
statinfo = stat(self.filename)
self.timestamp = statinfo.st_mtime
| Python |
# config.py
"""Pydicom configuration options."""
# Copyright (c) 2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# doc strings following items are picked up by sphinx for documentation
allow_DS_float = False
"""Set allow_float to True to allow DS instances to be created with floats;
otherwise, they must be explicitly converted to strings, with the user
explicity setting the precision of digits and rounding. Default: False"""
enforce_valid_values = True
"""Raise errors if any value is not allowed by DICOM standard, e.g. DS strings
that are longer than 16 characters; IS strings outside the allowed range.
"""
| Python |
#!/usr/bin/env python
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import os
import os.path
import sys
setup(name="pydicom",
packages = find_packages(),
include_package_data = True,
version="1.0a",
package_data = {'dicom': ['testfiles/*.dcm']},
zip_safe = False, # want users to be able to see included examples,tests
description="Pure python package for DICOM medical file reading and writing",
author="Darcy Mason",
author_email="darcymason@gmail.com",
url="http://pydicom.googlecode.com",
license = "MIT license",
keywords = "dicom python medical imaging",
classifiers = [
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Libraries",
],
long_description = """
pydicom is a pure python package for parsing DICOM files.
DICOM is a standard (http://medical.nema.org) for communicating
medical images and related information such as reports
and radiotherapy objects.
pydicom makes it easy to read these complex files into natural
pythonic structures for easy manipulation.
Modified datasets can be written again to DICOM format files.
See the `Getting Started <http://code.google.com/p/pydicom/wiki/GettingStarted>`_
wiki page for installation and basic information, and the
`Pydicom User Guide <http://code.google.com/p/pydicom/wiki/PydicomUserGuide>`_ page
for an overview of how to use the pydicom library.
""",
test_loader = "dicom.test.run_tests:MyTestLoader",
test_suite = "dummy_string"
)
| Python |
# csv2dict.py
"""Reformat a dicom dictionary csv file (from e.g. standards docs) to Python syntax
Write the DICOM dictionary elements as:
tag: (VR, VM, description, is_retired)
in python format
Also write the repeating groups or elements (e.g. group "50xx")
as masks that can be tested later for tag lookups that didn't work
"""
#
# Copyright 2008-2012, Darcy Mason
# This file is part of pydicom.
# See the license.txt file
csv_filename = "DICOM_dictionary_2008.csv"
pydict_filename = "_dicom_dict.py"
main_dict_name = "DicomDictionary"
mask_dict_name = "RepeatersDictionary"
def write_dict(f, dict_name, attributes, tagIsString):
if tagIsString:
entry_format = """'%s': ('%s', '%s', "%s", '%s')"""
else:
entry_format = """%s: ('%s', '%s', "%s", '%s')"""
f.write("\n%s = {\n" % dict_name)
f.write(",\n".join(entry_format % attribute for attribute in attributes))
f.write("}\n")
if __name__ == "__main__":
import csv # comma-separated value module
csv_reader = csv.reader(file(csv_filename, 'rb'))
main_attributes = []
mask_attributes = []
for row in csv_reader:
tag, description, VR, VM, is_retired = row
tag = tag.strip() # at least one item has extra blank on end
group, elem = tag[1:-1].split(",")
# Handle one case "(0020,3100 to 31FF)" by converting to mask
# Do in general way in case others like this come in future standards
if " to " in elem:
from_elem, to_elem = elem.split(" to ")
if from_elem.endswith("00") and to_elem.endswith("FF"):
elem = from_elem[:2] + "xx"
else:
raise NotImplementedError, "Cannot mask '%s'" % elem
description = description.replace("\x92", "'") # non-ascii apostrophe used
description = description.replace("\x96", "-") # non-ascii dash used
# If blank (e.g. (0018,9445) and (0028,0020)), then add dummy vals
if VR == '' and VM == '' and is_retired:
VR = 'OB'
VM = '1'
description = "Retired-blank"
# Handle retired "repeating group" tags e.g. group "50xx"
if "x" in group or "x" in elem:
tag = group + elem # simple concatenation
mask_attributes.append((tag, VR, VM, description, is_retired))
else:
tag = "0x%s%s" % (group, elem)
main_attributes.append((tag, VR, VM, description, is_retired))
py_file = file(pydict_filename, "wb")
py_file.write("# %s\n" % pydict_filename)
py_file.write('"""DICOM data dictionary auto-generated by %s"""\n' % __file__)
write_dict(py_file, main_dict_name, main_attributes, tagIsString=False)
write_dict(py_file, mask_dict_name, mask_attributes, tagIsString=True)
py_file.close()
print "Finished creating python file %s containing the dicom dictionary" % pydict_filename
print "Wrote %d tags" % (len(main_attributes)+len(mask_attributes))
| Python |
# csv2dict2011.py
# -*- coding: utf-8 -*-
"""Reformat a dicom dictionary csv file (from e.g. standards docs) to Python syntax
Write the DICOM dictionary elements as:
tag: (VR, VM, description, keyword, is_retired)
in python format
Also write the repeating groups or elements (e.g. group "50xx")
as masks that can be tested later for tag lookups that didn't work
"""
#
# Copyright 2011-2012, Darcy Mason
# This file is part of pydicom, released under an MIT licence.
# See license.txt file for more details.
csv_filename = "dict_2011.csv"
pydict_filename = "_dicom_dict.py"
main_dict_name = "DicomDictionary"
mask_dict_name = "RepeatersDictionary"
def write_dict(f, dict_name, attributes, tagIsString):
if tagIsString:
entry_format = """'%s': ('%s', '%s', "%s", '%s', '%s')"""
else:
entry_format = """%s: ('%s', '%s', "%s", '%s', '%s')"""
f.write("\n%s = {\n" % dict_name)
f.write(",\n".join(entry_format % attribute for attribute in attributes))
f.write("}\n")
if __name__ == "__main__":
import csv # comma-separated value module
csv_reader = csv.reader(file(csv_filename, 'rb'))
main_attributes = []
mask_attributes = []
for row in csv_reader:
tag, description, keyword, VR, VM, is_retired = row
if tag == '' or tag == "Tag":
continue
tag = tag.strip() # at least one item has extra blank on end
VR = VR.strip() # similarly, some VRs have extra blank
keyword = keyword.strip() # just in case
group, elem = tag[1:-1].split(",")
if is_retired.strip() == 'RET':
is_retired = 'Retired'
if VR == "see note": # used with some delimiter tags
VR = "NONE" # to be same as '08 dict in pydicom
# Handle one case "(0020,3100 to 31FF)" by converting to mask
# Do in general way in case others like this come in future standards
if " to " in elem:
from_elem, to_elem = elem.split(" to ")
if from_elem.endswith("00") and to_elem.endswith("FF"):
elem = from_elem[:2] + "xx"
else:
raise NotImplementedError, "Cannot mask '%s'" % elem
if description.endswith(" "):
description = description.rstrip()
description = description.replace("’", "'") # non-ascii apostrophe
description = description.replace("‑", "-") # non-ascii dash used, shows in utf-8 as this a character
description = description.replace("µ", "u") # replace micro symbol
# If blank (e.g. (0018,9445) and (0028,0020)), then add dummy vals
if VR == '' and VM == '' and is_retired:
VR = 'OB'
VM = '1'
description = "Retired-blank"
# One odd tag in '11 standard (0028,3006)
if VR == 'US or OW': # extra space
VR = 'US or OW'
# Handle retired "repeating group" tags e.g. group "50xx"
if "x" in group or "x" in elem:
tag = group + elem # simple concatenation
mask_attributes.append((tag, VR, VM, description, is_retired, keyword))
else:
tag = "0x%s%s" % (group, elem)
main_attributes.append((tag, VR, VM, description, is_retired, keyword))
py_file = file(pydict_filename, "w")
py_file.write("# %s\n" % pydict_filename)
py_file.write('"""DICOM data dictionary auto-generated by %s"""\n' % __file__)
write_dict(py_file, main_dict_name, main_attributes, tagIsString=False)
write_dict(py_file, mask_dict_name, mask_attributes, tagIsString=True)
py_file.close()
print "Finished creating python file %s containing the dicom dictionary" % pydict_filename
print "Wrote %d tags" % (len(main_attributes)+len(mask_attributes))
| Python |
from __future__ import with_statement
# file "make_private_dict.py"
# Copyright (c) 2009 Daniel Nanz
# This file is released under the pydicom (http://code.google.com/p/pydicom/)
# license.
# See the file license.txt included with the pydicom distribution, also
# available at http://pydicom.googlecode.com
'''
-- Usage ------------------ (>= python 2.5, <3)---
python make_private_dict_alt.py
or
python make_private_dict_alt.py target_file_path
--------------------------------------------------
This script reads the DICOM private tag information as
maintained by the GDCM project (http://sourceforge.net/projects/gdcm/)
from their website and prints it either to sys.stdout
(if target_file_path == None) or to the file identified by a input
target_file_path.
The output is structured such, that for target_file_path = "_private_dict.py"
the output file can replace the current _private_dict.py file of the pydicom
source, which should allow straightforward testing.
'''
import urllib2
import io
import xml.etree.cElementTree as ET
import sys
import datetime
import os
import pprint
GDCM_URL = ''.join(('http://gdcm.svn.sf.net/viewvc/gdcm/trunk',
'/Source/DataDictionary/privatedicts.xml'))
UNKNOWN_NAME = 'Unknown'
PRIVATE_DICT_NAME = 'private_dictionaries'
def get_private_dict_from_GDCM(url=GDCM_URL, retired_field=''):
'''open GDCM_URL, read content into BytesIO file-like object and parse
into an ElementTree instance
'''
etree = ET.parse(io.BytesIO(urllib2.urlopen(GDCM_URL).read()))
p_dict = etree.getroot()
entries = [entry for entry in p_dict.findall('entry')]
private_dict = dict()
for e in entries:
d = dict()
for item in e.items():
d[item[0]] = item[1]
tag_string = ''.join((d['group'], d['element']))
if d['name'] == '?':
d['name'] = UNKNOWN_NAME
dict_entry = (d['vr'], d['vm'], d['name'], retired_field)
owner = d['owner']
if owner in private_dict.keys():
pass
else:
private_dict[owner] = dict()
curr_dict = private_dict[owner]
curr_dict[tag_string] = dict_entry
return private_dict
def get_introductory_text(filename, datestring):
s = '\n'.join(('# ' + filename,
'# This file is autogenerated by "make_private_dict.py",',
'# from private elements list maintained by the GDCM project',
'# ('+ GDCM_URL + ').',
'# Downloaded on ' + datestring + '.',
'# See the pydicom license.txt file for license information on pydicom, and GDCM.',
'',
'# This is a dictionary of DICOM dictionaries.',
'# The outer dictionary key is the Private Creator name ("owner"),',
'# the inner dictionary is a map of DICOM tag to ',
'# (VR, type, name, is_retired)',
'',
PRIVATE_DICT_NAME + ' = \\\n'))
return s
def main():
'''Get private dict from GDCM project. Write to sys.stdout or to output
file given as pathname and as the first argument to the script.
'''
private_dict = get_private_dict_from_GDCM()
try:
file_path = sys.argv[1]
except IndexError:
file_path = None
if file_path != None:
with open(file_path, 'wb') as fd:
filename = os.path.basename(file_path)
datestring = datetime.date.isoformat(datetime.date.today())
int_text = get_introductory_text(filename, datestring)
fd.write(int_text)
pprint.pprint(private_dict, fd)
else:
pprint.pprint(private_dict)
if __name__ == '__main__':
main()
| Python |
# make_UID_dict.py
"""Reformat a UID list csv file (Table A-1 PS3.6-2008) to Python syntax
Write the dict elements as:
UID: (name, type, name_info, is_retired)
in python format
name_info is extra information extracted from very long names, e.g.
which bit size a particular transfer syntax is default for
is_retired is 'Retired' if true, else is ''
"""
#
# Copyright 2008-2012, Darcy Mason
# This file is part of pydicom.
# See the license.txt file for license information.
csv_filename = "UID_dictionary.csv"
pydict_filename = "_UID_dict.py"
dict_name = "UID_dictionary"
def write_dict(f, dict_name, attributes):
entry_format = """'%s': ('%s', '%s', '%s', '%s')"""
f.write("\n%s = {\n" % dict_name)
f.write(",\n".join(entry_format % attribute for attribute in attributes))
f.write("}\n")
if __name__ == "__main__":
import csv # comma-separated value module
csv_reader = csv.reader(file(csv_filename, 'rb'))
attributes = []
for row in csv_reader:
UID, name, UIDtype, reference = row
name_info = ""
is_retired = ""
name = name.replace("\x96", "-") # non-ascii character
if name.endswith("(Retired)"):
name = name[:-10]
is_retired = "Retired"
if ":" in name:
name, name_info = name.split(":")
if "&" in name:
name = name.replace("&", "and")
name_info = name_info.strip() # clear leading (and trailing, if any) whitespace
attributes.append((UID, name, UIDtype, name_info, is_retired)) # leave Part reference out
py_file = file(pydict_filename, "wb")
py_file.write("# %s\n" % pydict_filename)
py_file.write('"""\n%s\n"""\n' % "Dictionary of UID: (name, type, name_info, is_retired)\n")
py_file.write('# Auto-generated by %s"""\n' % __file__)
write_dict(py_file, dict_name, attributes)
py_file.close()
print "Finished creating python file %s containing the UID dictionary" % pydict_filename
print "Wrote %d elements" % len(attributes)
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
"""
Polygon partitioning and other utilities.
It's simple, it's in pure python and it just works, although the algorithm is
not the fastest (but for polygons with 5 or 10 vertices it doesn't matter
anyway).
Usage:
>>> import poly
>>> poly.triangulate([(0,0), (1,0), (0,1), (0.5, 0.25)])
[[(1, 0), (0, 1), (0.5, 0.25)], [(0, 0), (1, 0), (0.5, 0.25)]]
Note: This algorithm is a home-made hybrid, better is to use "ear clipping"
algorithm, but I didn't know about it when I wrote this.
"""
def isright(a,b,c):
"is c on the right, or on the left of the arrow a-->b?"
return (b[0]-a[0])*(c[1]-a[1])<(b[1]-a[1])*(c[0]-a[0])
def isline(t):
a=t[0]
b=t[1]
c=t[2]
return abs((b[0]-a[0])*(c[1]-a[1])-(b[1]-a[1])*(c[0]-a[0]))<1e-6
def convex(vertices):
vertices=vertices+[vertices[0]]
dir=isright(vertices[0],vertices[1],vertices[2])
for i in range(1,len(vertices)-2):
if isright(vertices[i],vertices[i+1],vertices[i+2])!=dir:
return False
return True
def isinpolygon(x,vertices):
if x in vertices: return False
isin=False
vj=vertices[-1]
for vi in vertices:
if ((vi[1]<=x[1] and x[1]<vj[1]) or (vj[1]<=x[1] and x[1]<vi[1])) and \
(x[0] < (vj[0]-vi[0])*(x[1]-vi[1])/(vj[1]-vi[1]) + vi[0]):
isin=not isin;
vj=vi
return isin;
def overlapping(pol1,pol2):
for x in pol1:
if isinpolygon(x,pol2):
return True
for x in pol2:
if isinpolygon(x,pol1):
return True
return False
def triangulate(vertices):
if len(vertices)==3:
if isline(vertices):
return []
else:
return [vertices]
elif convex(vertices):
r=[]
for i in range(2,len(vertices)):
v=[vertices[0]]+vertices[i-1:i+1]
if not isline(v):
r.append(v)
return r
else:
n1=0
n2=n1+2
pol1=vertices[n1:n2+1]
pol2=vertices[:n1+1]+vertices[n2:]
# print pol1, pol2
while overlapping(pol1,pol2):
if n2<len(vertices)-2:
n2+=1
elif n1<len(vertices)-3:
n1+=1
n2=n1+2
else:
raise "didn't find any partitioning"
pol1=vertices[n1:n2+1]
pol2=vertices[:n1+1]+vertices[n2:]
# print pol1, pol2
return triangulate(pol1)+triangulate(pol2)
| Python |
#! /usr/bin/env python
#import cgitb;cgitb.enable(format="text")
import sys
sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh using tetgen..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
print "Reading mesh from tetgen..."
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
| Python |
#! /usr/bin/env python
#import cgitb;cgitb.enable(format="text")
import sys
sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh using tetgen..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
print "Reading mesh from tetgen..."
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
| Python |
#! /usr/bin/env python
#problem: probably with surface holes, it doesn't work correctly. Also with
# arcs - they approximated by lines.
import sys
sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh using tetgen..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
print "Reading mesh from tetgen..."
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
| Python |
#! /usr/bin/env python
#problem: probably with surface holes, it doesn't work correctly. Also with
# arcs - they approximated by lines.
import sys
sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh using tetgen..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
print "Reading mesh from tetgen..."
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
| Python |
#! /usr/bin/env python
import sys
sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
| Python |
#! /usr/bin/env python
import sys
sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
| Python |
#! /usr/bin/env python
import sys
sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh using tetgen..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
print "Reading mesh from tetgen..."
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
| Python |
#! /usr/bin/env python
import sys
sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh using tetgen..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
print "Reading mesh from tetgen..."
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
| Python |
#! /usr/bin/env python
import sys; sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh using tetgen..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
print "Reading mesh from tetgen..."
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
m.printinfo()
| Python |
#! /usr/bin/env python
import sys; sys.path.append("../..")
import pexpect
import geom
pexpect.run("gmsh -0 t.geo -o /tmp/x.geo")
g=geom.read_gmsh("/tmp/x.geo")
g.printinfo()
geom.write_tetgen(g,"/tmp/t.poly")
print "Generating mesh using tetgen..."
geom.runtetgen("/home/ondra/fzu/mesh/tetgen","/tmp/t.poly")
print "Reading mesh from tetgen..."
m=geom.read_tetgen("/tmp/t.1")
m.writemsh("/tmp/t12.msh")
print "Mesh written to /tmp/t12.msh"
m.writexda("/tmp/in.xda")
print "Mesh written to /tmp/in.xda"
m.writeregions("/tmp/t12.regions")
print "Regions written to /tmp/t12.regions"
m.writeBC("/tmp/t12.boundaries")
print "Boundaries written to /tmp/t12.boundaries"
m.printinfo()
| Python |
import math
from pyparsing import Word, Optional, alphas, nums, Combine, Literal, \
CaselessLiteral, LineEnd, Group, Dict, OneOrMore, StringEnd, restOfLine, \
ParseException, oneOf, Forward, alphanums, Keyword, SkipTo, ZeroOrMore
import geometry as geom
def read_gmsh(filename):
e = CaselessLiteral("E")
inum = Word("+-"+nums)
fnum = Combine(
Word( "+-"+nums, nums ) + Optional("."+Optional(Word(nums))) +
Optional(e+Word("+-"+nums,nums))
)
semi = Literal(";").suppress()
colon = Literal(",").suppress()
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
lbrace = Literal("{").suppress()
rbrace = Literal("}").suppress()
eq = Literal("=").suppress()
point = Group(
Keyword("Point")+lpar+inum+rpar+eq+
Group(lbrace+fnum+colon+fnum+colon+fnum+colon+fnum+rbrace)+semi
)
line = Group(
Keyword("Line")+lpar+inum+rpar+eq+
Group(lbrace+inum+colon+inum+rbrace)+semi
)
lineloop = Group(
Keyword("Line Loop")+lpar+inum+rpar+eq+
Group(lbrace+inum+OneOrMore(colon+inum)+rbrace)+semi
)
circle = Group(
Keyword("Circle")+lpar+inum+rpar+eq+
Group(lbrace+inum+colon+inum+colon+inum+rbrace)+semi
)
planesurface = Group(
Keyword("Plane Surface")+lpar+inum+rpar+eq+
Group(lbrace+inum+rbrace)+semi
)
ruledsurface = Group(
Keyword("Ruled Surface")+lpar+inum+rpar+eq+
Group(lbrace+inum+rbrace)+semi
)
surfaceloop = Group(
Keyword("Surface Loop")+lpar+inum+rpar+eq+
Group(lbrace+inum+OneOrMore(colon+inum)+rbrace)+semi
)
volume = Group(
Keyword("Volume")+lpar+inum+rpar+eq+
Group(lbrace+inum+rbrace)+semi
)
physicalsurface = Group(
Keyword("Physical Surface")+lpar+inum+rpar+eq+
Group(lbrace+inum+ZeroOrMore(colon+inum)+rbrace)+semi
)
physicalvolume = Group(
Keyword("Physical Volume")+lpar+inum+rpar+eq+
Group(lbrace+inum+ZeroOrMore(colon+inum)+rbrace)+semi
)
skip1 = Group(
Word(alphanums)+eq+fnum+semi
)
comment = Group( Literal("//")+restOfLine).suppress()
command = point | line | lineloop | circle | planesurface | ruledsurface | \
surfaceloop | volume | physicalsurface | physicalvolume | comment \
| skip1
grammar= OneOrMore(command)+StringEnd()
try:
tokens= grammar.parseFile(filename)
except ParseException, err:
print err.line
print " "*(err.column-1) + "^"
print err
raise err
lineloops={}
surfaceloops={}
g=geom.geometry()
for x in tokens:
if x[0]=="Point":
g.addpoint(int(x[1]),[float(x[2][0]),float(x[2][1]),float(x[2][2])])
elif x[0]=="Line":
assert len(x[2])==2
g.addline(int(x[1]),[int(x[2][0]),int(x[2][1])])
elif x[0]=="Circle":
assert len(x[2])==3
g.addline(int(x[1]),[int(x[2][0]),int(x[2][2])])
#g.add1(geom.circle(int(x[1]),int(x[2][0]),int(x[2][1]),
# int(x[2][2])))
elif x[0]=="Line Loop":
lineloops[int(x[1])]=[int(y) for y in x[2]]
elif x[0]=="Plane Surface":
assert len(x[2])==1
g.addsurface(int(x[1]),lineloops[int(x[2][0])])
elif x[0]=="Ruled Surface":
assert len(x[2])==1
g.addsurface(int(x[1]),lineloops[int(x[2][0])])
elif x[0]=="Surface Loop":
surfaceloops[int(x[1])]=[int(y) for y in x[2]]
elif x[0]=="Volume":
assert len(x[2])==1
g.addvolume(int(x[1]),surfaceloops[int(x[2][0])])
elif x[0]=="Physical Surface":
g.addphysicalsurface(int(x[1]),[int(y) for y in x[2]])
elif x[0]=="Physical Volume":
g.addphysicalvolume(int(x[1]),[int(y) for y in x[2]])
else:
raise "Unsupported entity: "+x[0]
return g
| Python |
# meshutils.py
"""
Finite element mesh utilites.
"""
__docformat__ = "restructuredtext en"
# Copyright (C) 2004-2005 O. Certik
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA.
#
# Please report all bugs and problems to <ondrej@certik.cz>.
################################################
import string
import math
import os
from pyparsing import Word, Optional, alphas, nums, Combine, Literal, CaselessLiteral, LineEnd, Group, Dict, OneOrMore, StringEnd, restOfLine, ParseException, oneOf, Forward, alphanums
import progressbar
#gmsh element types, see
#http://www.geuz.org/gmsh/doc/texinfo/gmsh_10.html#SEC65
#1D elements
mshpoint=15 #Point (1 node)
mshline=1 #Line (2 nodes)
mshline2=8 #Second order line (3 nodes)
#2D elements:
mshtriangle=2 #Triangle (3 nodes)
mshtriangle2=9 #Second order triangle (6 nodes)
mshquadrangle=3 #Quadrangle (4 nodes)
mshquadrangle2=10 #Second order quadrangle (9 nodes)
#3D elements
mshtetrahedron=4 #Tetrahedron (4 nodes)
mshtetrahedron2=11 #Tetrahedron (10 nodes)
mshprism=6 #Prism (6 nodes)
mshhexahedron=5 #Hexahedron (8 nodes)
#pmd element types, see
#http://www.it.cas.cz/manual/pmd/ra.htm
#2D elements:
pmdtriangle=4 #Triangle (3 or 6 nodes)
pmdtrianglerot=5 #Triangle (3 or 6 nodes)
pmdquadrangle=6 #Quadrangle (4 or 8 nodes)
pmdquadranglerot=7 #Quadrangle (4 or 8 nodes)
#3D elements
pmdtetrahedron=54 #Tetrahedron (4 or 10 nodes)
pmdprism=55 #Prism (6 or 15 nodes)
pmdhexahedron=56 #Hexahedron (8 or 20 nodes)
pmdsemiloof=61 #triangular semi-loof (6 nodes)
#libmesh element types, see
#http://libmesh.sourceforge.net/doxygen/namespacelibMeshEnums.php#54ee290fca7f0c26eb1e5986f52714574518904c8c2948ef3d2869c4cb4a2b8f
#2D elements:
libmeshtriangle=3 #TRI3
libmeshquadrangle=5 #QUAD4
#3D elements
libmeshtetrahedron=8 #TET4
libmeshtetrahedron_quad=9 #TET10
libmeshhexahedron=10 #HEX8
libmeshprism=13 #PRISM6
#number of the physical entity, which represents the whole model
mshmodelnum=100
def check(s,what):
if s != what:
error("'%s' missing"%(what),1)
class MeshUtilsError(Exception):
"""Base class for exceptions in meshutils."""
pass
class MeshUtilsCheckError(MeshUtilsError):
"""Function check failed."""
pass
class MeshUtilsParseError(MeshUtilsError):
"""Parse error in input file."""
pass
class MeshUtilsWarning(MeshUtilsError):
"""Not necessarily error, but some strange thing happened."""
pass
def error(s,type=0):
if type==1:
raise MeshUtilsCheckError,s
elif type==2:
raise MeshUtilsParseError,s
elif type==3:
raise MeshUtilsWarning,s
else:
raise MeshUtilsError,s
def myfloat(s):
"Converts s to float, including PMD float format (without E)."
try:
f=float(s)
except ValueError:
s=s[:-2]+"E"+s[-2:]
f=float(s)
return f
class bound:
"""Handles all physical entities <= mshmodelnum.
This class is optionaly filled in mesh.readmsh() by calling the method
bound.handle2(). It extracts **only** the node numbers of the entity.
All entities are stored in self.f dictionary (key is the entity number).
Has methods to get the node numbers in PMD notation:
(1 2 4 1 2 3 4 8 9 10 -> 1:4 8:10)
ie. it removes any repeating numbers and shortens the list using ":"
Has method to find elements from a given list, which lie on the
boundary formed by nodes (also returns appropriate side of elements).
So - all boundary conditions should be done using this class.
"""
def __init__(self):
self.f={} #dictionary for node numbers
self.elementsassociated=False
def readpmd(self,filename):
"""Loads internal dictionary from 'filename'.
Format must be the same as from write()"""
self.f={}
f=file(filename,"r")
line=f.readline()
while line:
x=string.split(line)
n=int(x[1])
if self.f.has_key(n):
error("two /N have the same number!",3)
self.f[n]=self.fromstr(x[3:len(x)])
line=f.readline()
f.close()
def fromstr(self,str):
f=[]
try:
for x in str:
seq=string.split(x,":")
if len(seq) == 1:
f.append(int(x))
elif len(seq) == 2:
f.extend(range(int(seq[0]),
int(seq[1])+1))
else:
error("Invalid syntax",2)
except ValueError:
error("Invalid syntax",2)
return f
def writepmd(self,filename):
f=file(filename,"w")
for n,l in self.f.iteritems():
f.write(" /N %d N"%(n))
f.write(self.str(self.simplify(l)))
f.write("\n")
f.close()
def getstr(self,key):
return self.str(self.getf(key))
def getf(self,key):
if key > mshmodelnum and not self.elementsassociated:
error("Elements from entity %d aren't associated."%(key))
return self.simplify(self.f[key])
def simplify(self,l):
"""Removes repeating numbers and sorts the internal list l.
Note: it's very slow for large meshes. This is the thing
which slows down everything.
"""
q=[]
for x in l:
if not (x in q): q.append(x)
q.sort()
return q
def str(self,l):
"Converts l (must be a sorted list) to string (using ':')."
s=""
old2x=-1
oldx=-1
for x in l:
if x!=oldx+1:
if (oldx != -1) and (oldx!=old2x):
if oldx==old2x+1:
s+=" %d"%(oldx)
else:
s+=":%d"%(oldx)
s+=" %d"%(x)
old2x=x
oldx=x
if (oldx != -1) and (oldx!=old2x):
if oldx==old2x+1:
s+=" %d"%(oldx)
else:
s+=":%d"%(oldx)
return s[1:]
def handle(self,n,list):
"Appends number in 'list' to internal dictionary (key=n)"
if not self.f.has_key(n):
self.f[n]=[]
self.f[n].extend(list)
def handle2(self,p):
entity=p[2]
eltype=p[1]
if eltype==mshpoint:
self.handle(entity,(p[5],))
elif eltype==mshline:
self.handle(entity,(p[5],p[6]))
elif eltype==mshline2:
self.handle(entity,(p[5],p[6],p[7]))
elif eltype==mshtriangle:
self.handle(entity,(p[5],p[6],p[7]))
elif eltype==mshquadrangle:
self.handle(entity,(p[5],p[6],p[7],p[8]))
else:
error("unsupported element type. "\
"entity: %d; eltype %d;\n%s"\
%(entity,eltype,repr(p)),3)
def handleelement(self,p):
"""Handles element - not for boundary conditions"""
self.elementsassociated=False
entity=p[2]
if not self.f.has_key(entity):
self.f[entity]=[]
self.f[entity].append([p[0],p[1]]+p[5:])
def findelements(self,key,elements):
"""Returns element numbers (in a list), which lies
at the boundary determined by self.nodes.
Also returns the side of element. Currently only support
triangles and quadrangles.
"""
if not self.f.has_key(key):
error("physical entity %d isn't in bound, aborting..."%(
key))
el=[]
nodes=self.simplify(self.f[key])
#print nodes
for p in elements:
nods=[]
i=1
for n in p[2:]:
if nodes.count(n):
nods.append(i)
i+=1
if self.is2d:
if len(nods)==3:
if p[1] == pmdtriangle or \
p[1] == pmdtrianglerot:
while nods[-1] > 3:
nods=nods[:-1]
if p[1] == pmdquadrangle or \
p[1] == pmdquadrangle:
while nods[-1] > 4:
nods=nods[:-1]
if len(nods)==2:
if nods==[1,2]:
side=1
elif nods==[2,3]:
side=2
elif nods==[3,4]:
side=3
elif nods==[1,3]:
side=3 #must be triangle. check it.
if p[1] != pmdtriangle and p[1] != pmdtrianglerot:
error("findelements: error 3",3)
elif nods==[1,4]:
side=4 #must be quadrangle. check it.
if p[1] != pmdquadrangle and p[1] != pmdquadranglerot:
error("findelements: error 4",3)
else:
error("findelements: error 2",3)
el.append((p[0],side))
else:
if len(nods) == 3:
if p[1] == pmdtetrahedron:
if nods==[1,2,3]:
side=1
elif nods==[1,2,4]:
side=2
elif nods==[2,3,4]:
side=3
elif nods==[1,3,4]:
side=4
else:
side=0
error("findelements:error 7",3)
el.append((p[0],side))
else:
error("findelements:error 6",3)
if len(nods)==4:
if p[1] == pmdhexahedron:
if nods==[1,2,3,4]:
side=1
elif nods==[1,2,5,6]:
side=2
elif nods==[2,3,6,7]:
side=3
elif nods==[3,4,7,8]:
side=4
elif nods==[1,4,5,8]:
side=5
elif nods==[5,6,7,8]:
side=6
else:
side=0
error("findelements:error 7",3)
el.append((p[0],side))
elif p[1] == pmdprism:
if nods == [1,3,4,6]:
side=4
else:
side=0
error("findelements:error 9",3)
el.append((p[0],side))
else:
error("findelements:error 5",3)
return el
def writeSV(self,f,els,key):
for e in els:
f.write(" /S %d E %d S%d\n"%(key,e[0],e[1]))
def associateelements(self,elements):
if self.elementsassociated:
return
keys=[k for k in self.f.keys() if k>mshmodelnum]
for key in keys:
self.associateelements_key_fast(key,elements)
self.elementsassociated=True
def associateelements_key_fast(self,key,elements):
"""Finds the elements under the key ``key`` in ``elements``.
This is a fast version, which assumes an undocumented feature, that
all the elements which gmsh exports are in *exactly* the same
order both in the entity `key` and `mshmodelnum`.
"""
list=self.f[key]
p=list[0]
first=self.finde(p,elements)
listout=range(first,first+len(list))
#print listout
#listout.sort()
#print listout
self.f[key]=listout
def associateelements_key(self,key,elements):
"""Finds the elements under the key ``key`` in ``elements``.
This is a regular (very slow, but bullet proof) version.
"""
list=self.f[key]
listout=[]
for n,p in enumerate(list):
listout.append(self.finde(p,elements))
#listout.sort()
#print listout
self.f[key]=listout
def finde(self,p,elements):
elnum=0
for e in elements:
if tuple(e[2:])==tuple(p[2:]):
elnum=e[0]
break
if elnum==0:
error("element not found.",2)
return elnum
class mesh:
#This class should be refactored anyway, to include better IO
#interface, as I was thinking a year ago
#the bound() is probably unnecessary, and the IO routines
#(pmd,libmesh,gmsh,tetgen...) should be done in some clever way
#currently tetgen is in the module tetgen, others are here, then
#the geometry is in tetgen and gmsh modules...
#ideal solution is to have 2 classes: geometry and mesh. both would
#support import/export in some clean way.
def __init__(self):
self.clean()
def clean(self):
"Deletes the whole mesh."
#following variables are the only variables in mesh
#(=the state mesh instance is in is fully determined be
#these variables)
#the user can do what he wants with them
#and wherever the method mesh.clean() is called,
#the mesh instance is "fresh" afterwards.
#also it isn't allowed to store any other variable in
#mesh class then the following.
self.nodes = []
#(n,x,y,z) : (int,float,float,float)
#n ... node number
# always ordered in a consecutive way, starting from 1
# it must already be given like this in I1 and msh
#x,y,z ... node coordinates
self.elements = []
#(n, eltype, nodes....) : (int,int,int,int,int...)
#n .... element number
# always ordered in a consecutive way, starting from 1
# it must already be given like this in I1 and msh
#eltype .... PMD element type
#nodes ... associated node numbers
# 1st and 2nd order elements differs only by the number of
# nodes (ie. 3 and 6 for a triangle)
#
#The whole mesh is in fact PMD type of mesh - it is using
#PMD primitives etc. So conversion to/from other formats
#is handled by appropriate functions (writemsh,readmsh,
#readELE,writeELE,...)
self.boundbox = (0,0,0,0,0,0)
#reading:
# I1: read from RP
# msh: computed from node coordinates and
# added +-eps (see readmsh)
# ELE,NOD:isn't set
#writing:
# I1: written to RP
# msh,ELE,NOD: isn't used
self.is2d=False #is the problem 2D (z==0) ?
#reading:
# I1: set automatically distinguishing between (x,y) and(x,y,z)
# msh: set automatically dis. (x,y,"0") and (x,y,z)
# NOD: set automatically dis. (x,y,"0.0") and (x,y,z)
# ELE: isn't set
#writing:
# I1:(x,y) versus (x,y,z)
# msh:(x,y,"0") versus (x,y,z)
# NOD,ELE: isn't used
self.symmetric = False #rotational symmetric
#reading:
# I1: read from kss
# msh and ELE: set from the parameter symmetric (readmsh)
# eltypes automatically converted to PMD according to
# self.symmetric
# NOD: isn't set
#writing:
# I1: written to kss
# msh: eltypes converted to MSH format according to symmetric
# NOD,ELE: isn't used
self.crit = 3.0
#reading:
# I1: read from crit
# msh,NOD,ELE: isn't set
#writing:
# I1: written to crit
# msh,NOD,ELE: isn't used
def readmsh(self,filename,b=None,symmetric=False,associateelements=True):
"""Reads mesh from filename (*.msh).
it will read the physical entity "mshmodelnum" (default 100),
which must contain every node (which will be used) and
every element.
Optional parameter b of type bound will be filled with
all other physical (!=mshmodelnum) nodes.
example:
gmsh exports these physical entities: 100,1,2,101,200
then readmsh will read 100 into self.elements and
self.nodes, and optionally fills "b" with entities
1,2,101 and 200. The assumption is, that entity 100
will contain every node and element used in entities
1,2,101 and 200. Also the nodes and elements in 100
must be consecutively sorted (use mshsort for this
purpose)
it will convert msh types to PMD types, so that
self.elements only contains PMD types
symmetric.... is the problem rot symmetric?
if yes, readmsh() will automatically convert triangles
to rottriangles and quadrangles to rotquadrangles.
"""
self.clean()
f=file(filename,"r")
l=f.readline()
check(l,"$NOD\n")
l=f.readline()
nnod=int(l)
l=f.readline()
n=1
M=1
xl=+M;xu=-M;yl=M;yu=-M;zl=M;zu=-M;
self.symmetric=symmetric
while l:
x=string.split(l)
p=[float(a) for a in x]
if p[0] != n:
error("node-number mismatch (n=%d;p[0]=%d)"\
%(n,p[0]),2)
if p[1]<xl:xl=p[1]
if p[1]>xu:xu=p[1]
if p[2]<yl:yl=p[2]
if p[2]>yu:yu=p[2]
if p[3]<zl:zl=p[3]
if p[3]>zu:zu=p[3]
if x[3]!="0": self.is2d=False
self.nodes.append((int(p[0]),p[1],p[2],p[3]))
l=f.readline()
if n==nnod: break
n+=1
if symmetric and not self.is2d:
error("symmetric and it isn't 2D!",2)
if b!=None:
b.is2d=self.is2d
check(l,"$ENDNOD\n")
l=f.readline()
check(l,"$ELM\n")
l=f.readline()
nelm=int(l)
l=f.readline()
n=1
pmdelm=0
faces=[]
while l and nelm != 0:
x=string.split(l)
p=[int(a) for a in x]
if p[0] != n: error("elm-number mismatch",2)
if p[2] == mshmodelnum:
if p[1] == mshtriangle:
if not self.is2d:
error("2D element in 3D mesh",2)
pmdelm+=1
if symmetric:
eltype=pmdtrianglerot
else:
eltype=pmdtriangle
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7]))
elif p[1] == mshtriangle2:
if not self.is2d:
error("2D element in 3D mesh",2)
pmdelm+=1
if symmetric:
eltype=pmdtrianglerot
else:
eltype=pmdtriangle
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7],p[8],p[9],p[10]))
elif p[1] == mshquadrangle:
if not self.is2d:
error("2D element in 3D mesh",2)
pmdelm+=1
if symmetric:
eltype=pmdquadranglerot
else:
eltype=pmdquadrangle
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7],p[8]))
elif p[1] == mshquadrangle2:
if not self.is2d:
error("2D element in 3D mesh",2)
pmdelm+=1
if symmetric:
eltype=pmdquadranglerot
else:
eltype=pmdquadrangle
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7],p[8],
p[9],p[10],p[11],p[12]))
faces.append(p[13])
elif p[1] == mshtetrahedron:
if self.is2d:
error("3D element in 2D mesh",2)
pmdelm+=1
eltype=pmdtetrahedron
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7],p[8]))
elif p[1] == mshtetrahedron2:
if self.is2d:
error("3D element in 2D mesh",2)
pmdelm+=1
eltype=pmdtetrahedron
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7],p[8],
p[9],p[10],p[11],p[12],p[13],p[14]))
elif p[1] == mshhexahedron:
if self.is2d:
error("3D element in 2D mesh",2)
pmdelm+=1
eltype=pmdhexahedron
el=[pmdelm,eltype]
el.extend(p[5:5+8])
self.elements.append(tuple(el))
elif p[1] == mshprism:
if self.is2d:
error("3D element in 2D mesh",2)
pmdelm+=1
eltype=pmdprism
el=[pmdelm,eltype]
el.extend(p[5:5+6])
self.elements.append(tuple(el))
else:
error("unsupported el %d"%(p[1]),3)
elif p[2] < mshmodelnum:
if b!=None:
b.handle2(p)
else:
if b!=None:
b.handleelement(p)
l=f.readline()
#if n==nelm: break
if l[0]=="$": break
n+=1
check(l,"$ENDELM\n")
l=f.readline()
if l != "": error("extra lines at the end of file",2)
f.close()
eps=0.001
self.boundbox=(xl-eps, xu+eps, yl-eps, yu+eps, zl-eps, zu+eps)
self.removecentralnodes(faces)
if b!=None and associateelements:
b.associateelements(self.elements)
def readmsh2(self,filename,b=None,symmetric=False,associateelements=True):
"""Reads mesh from filename (*.msh). Version 2.0
it will read the physical entity "mshmodelnum" (default 100),
which must contain every node (which will be used) and
every element.
Optional parameter b of type bound will be filled with
all other physical (!=mshmodelnum) nodes.
example:
gmsh exports these physical entities: 100,1,2,101,200
then readmsh will read 100 into self.elements and
self.nodes, and optionally fills "b" with entities
1,2,101 and 200. The assumption is, that entity 100
will contain every node and element used in entities
1,2,101 and 200. Also the nodes and elements in 100
must be consecutively sorted (use mshsort for this
purpose)
it will convert msh types to PMD types, so that
self.elements only contains PMD types
symmetric.... is the problem rot symmetric?
if yes, readmsh() will automatically convert triangles
to rottriangles and quadrangles to rotquadrangles.
"""
self.clean()
f=file(filename,"r")
l=f.readline()
l=f.readline()
l=f.readline()
l=f.readline()
check(l,"$Nodes\n")
l=f.readline()
nnod=int(l)
l=f.readline()
n=1
M=1
xl=+M;xu=-M;yl=M;yu=-M;zl=M;zu=-M;
self.symmetric=symmetric
while l:
x=string.split(l)
p=[float(a) for a in x]
if p[0] != n:
error("node-number mismatch (n=%d;p[0]=%d)"\
%(n,p[0]),2)
if p[1]<xl:xl=p[1]
if p[1]>xu:xu=p[1]
if p[2]<yl:yl=p[2]
if p[2]>yu:yu=p[2]
if p[3]<zl:zl=p[3]
if p[3]>zu:zu=p[3]
if x[3]!="0": self.is2d=False
self.nodes.append((int(p[0]),p[1],p[2],p[3]))
l=f.readline()
if n==nnod: break
n+=1
if symmetric and not self.is2d:
error("symmetric and it isn't 2D!",2)
if b!=None:
b.is2d=self.is2d
check(l,"$EndNodes\n")
l=f.readline()
check(l,"$Elements\n")
l=f.readline()
nelm=int(l)
l=f.readline()
n=1
pmdelm=0
faces=[]
while l and nelm != 0:
x=string.split(l)
p=[int(a) for a in x]
if p[0] != n: error("elm-number mismatch",2)
if p[2] == mshmodelnum:
if p[1] == mshtriangle:
if not self.is2d:
error("2D element in 3D mesh",2)
pmdelm+=1
if symmetric:
eltype=pmdtrianglerot
else:
eltype=pmdtriangle
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7]))
elif p[1] == mshtriangle2:
if not self.is2d:
error("2D element in 3D mesh",2)
pmdelm+=1
if symmetric:
eltype=pmdtrianglerot
else:
eltype=pmdtriangle
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7],p[8],p[9],p[10]))
elif p[1] == mshquadrangle:
if not self.is2d:
error("2D element in 3D mesh",2)
pmdelm+=1
if symmetric:
eltype=pmdquadranglerot
else:
eltype=pmdquadrangle
self.elements.append((pmdelm,eltype)+tuple(p[3:]))
elif p[1] == mshquadrangle2:
if not self.is2d:
error("2D element in 3D mesh",2)
pmdelm+=1
if symmetric:
eltype=pmdquadranglerot
else:
eltype=pmdquadrangle
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7],p[8],
p[9],p[10],p[11],p[12]))
faces.append(p[13])
elif p[1] == mshtetrahedron:
if self.is2d:
error("3D element in 2D mesh",2)
pmdelm+=1
eltype=pmdtetrahedron
self.elements.append((pmdelm,eltype,
p[5],p[6],p[7],p[8]))
elif p[1] == mshhexahedron:
if self.is2d:
error("3D element in 2D mesh",2)
pmdelm+=1
eltype=pmdhexahedron
el=[pmdelm,eltype]
el.extend(p[5:5+8])
self.elements.append(tuple(el))
elif p[1] == mshprism:
if self.is2d:
error("3D element in 2D mesh",2)
pmdelm+=1
eltype=pmdprism
el=[pmdelm,eltype]
el.extend(p[5:5+6])
self.elements.append(tuple(el))
else:
error("unsupported el %d"%(p[1]),3)
elif p[2] < mshmodelnum:
if b!=None:
b.handle2(p)
else:
if b!=None:
b.handleelement(p)
l=f.readline()
if n==nelm: break
if l[0]=="$": break
n+=1
l=f.readline()
check(l,"$EndElements\n")
l=f.readline()
if l != "": error("extra lines at the end of file",2)
f.close()
eps=0.001
self.boundbox=(xl-eps, xu+eps, yl-eps, yu+eps, zl-eps, zu+eps)
self.removecentralnodes(faces)
if b!=None and associateelements:
b.associateelements(self.elements)
def writexda(self,filename,verbose=True,b=None):
"""Writes mesh to filename (*.xda).
We try to be byte to byte compatible with the xda output from libmesh
(so I use the same tabs and spaces as libmesh does).
"""
up=progressbar.MyBar("Writing mesh to %s:"%filename)
if verbose: up.init(len(self.nodes)+2*len(self.elements))
mapping=[]
blocks={}
sew=0
c=0
for e in self.elements:
p=[n-1 for n in e[2:]]
t=e[1]
if t==pmdtriangle:
elt=libmeshtriangle
elif t==pmdquadrangle:
elt=libmeshquadrangle
elif t==pmdtetrahedron:
if len(p)==4:
elt=libmeshtetrahedron
else:
assert len(p)==10
elt=libmeshtetrahedron_quad
elif t==pmdhexahedron:
elt=libmeshhexahedron
elif t==pmdprism:
elt=libmeshprism
else:
error("Unimplemented yet t=%d."%(t),2)
if elt in blocks:
blocks[elt].append(p)
else:
blocks[elt]=[p]
mapping.append((elt,len(blocks[elt])))
sew+=len(p)
c+=1
if verbose: up.update(c)
nels=[len(blocks[t]) for t in blocks.keys()]
map2=[]
for x in mapping:
k=[y for y in blocks.keys() if y<x[0]]
n=0
for t in k:
n+=len(blocks[t])
map2.append(n+x[1]-1)
mapping=map2
#print mapping
bs=[]
if b:
for key in b.f.keys():
if key >=mshmodelnum: continue
bo=[ [mapping[el-1],side-1,key] for (el,side) in
b.findelements(key,self.elements)]
bs.extend(bo)
f=file(filename,"w")
f.write("DEAL 003:003\n")
f.write("%d # Num. Elements\n"%len(self.elements))
f.write("%d # Num. Nodes\n"%len(self.nodes))
f.write("%d # Sum of Element Weights\n"%(sew))
f.write("%d # Num. Boundary Conds.\n"%(len(bs)))
f.write("%d # String Size (ignore)\n"%(65536))
f.write("%d # Num. Element Blocks.\n"%len(blocks))
f.write(("%d "*len(blocks.keys()))%tuple(blocks.keys())+
" # Element types in each block.\n")
f.write(("%d "*len(nels))%tuple(nels)+
" # Num. of elements in each block.\n")
f.write("Id String\n")
f.write("Title String\n")
for block in blocks.values():
for el in block:
f.write(("%d "*len(el))%tuple(el)+"\n")
c+=1
if verbose: up.update(c)
for node in self.nodes:
f.write("%e %e %e \n"%tuple(node[1:]))
c+=1
if verbose: up.update(c)
for line in bs:
f.write(("%d "*len(line))%tuple(line)+"\n")
def writemsh(self,filename,verbose=True):
"""Writes mesh to filename (*.msh).
"""
up=progressbar.MyBar("Writing mesh to %s:"%filename)
if verbose: up.init(len(self.nodes)+len(self.elements))
f=file(filename,"w")
l=f.write("$NOD\n")
l=f.write("%d\n"%len(self.nodes))
for node in self.nodes:
if self.is2d:
f.write("%d %f %f %d\n"%node)
else:
f.write("%d %f %f %f\n"%node)
if verbose: up.update(node[0])
l=f.write("$ENDNOD\n")
l=f.write("$ELM\n")
l=f.write("%d\n"%len(self.elements))
for el in self.elements:
if el[1]==pmdtriangle or el[1]==pmdtrianglerot:
eltype=mshtriangle
number_of_nodes=3
#if we want 2nd order, fix it here
elif el[1]==pmdquadrangle or el[1]==pmdquadranglerot:
eltype=mshquadrangle
number_of_nodes=4
#if we want 2nd order, fix it here
elif el[1]==pmdsemiloof:
#eltype=mshtriangle
eltype=mshquadrangle
number_of_nodes=4
elif el[1]==pmdhexahedron:
eltype=mshhexahedron
number_of_nodes=8
#if we want 2nd order, fix it here
elif el[1]==pmdtetrahedron:
if len(el[2:])==4:
eltype=mshtetrahedron
number_of_nodes=4
elif len(el[2:])==10:
eltype=mshtetrahedron2
number_of_nodes=10
else:
assert False
elif el[1]==pmdprism:
eltype=mshprism
number_of_nodes=6
#if we want 2nd order, fix it here
else:
error("unsupported eltype type=%s"\
%(repr(el[1])),3)
n=[ #elm-number
el[0],
#elm-type
eltype,
#reg-phys
mshmodelnum,
#reg-elem
0,
#number-of-nodes
number_of_nodes]
#node-number-list
n.extend(el[2:2+number_of_nodes])
f.write("%d "*len(n)%tuple(n))
f.write("\n")
if verbose: up.update(len(self.nodes)+el[0])
l=f.write("$ENDELM\n")
f.close()
def writemsh2(self,filename):
"""Writes mesh to filename (*.msh). Version 2.0
"""
f=file(filename,"w")
l=f.write("$MeshFormat\n")
l=f.write("2.0 0 8\n")
l=f.write("$EndMeshFormat\n")
l=f.write("$Nodes\n")
l=f.write("%d\n"%len(self.nodes))
for node in self.nodes:
if self.is2d:
f.write("%d %f %f %d\n"%node)
else:
f.write("%d %f %f %f\n"%node)
l=f.write("$EndNodes\n")
l=f.write("$Elements\n")
l=f.write("%d\n"%len(self.elements))
for el in self.elements:
if el[1]==pmdtriangle or el[1]==pmdtrianglerot:
eltype=mshtriangle
number_of_nodes=3
#if we want 2nd order, fix it here
elif el[1]==pmdquadrangle or el[1]==pmdquadranglerot:
eltype=mshquadrangle
number_of_nodes=4
#if we want 2nd order, fix it here
elif el[1]==pmdsemiloof:
#eltype=mshtriangle
eltype=mshquadrangle
number_of_nodes=4
elif el[1]==pmdhexahedron:
eltype=mshhexahedron
number_of_nodes=8
#if we want 2nd order, fix it here
elif el[1]==pmdtetrahedron:
eltype=mshtetrahedron
number_of_nodes=4
#if we want 2nd order, fix it here
elif el[1]==pmdprism:
eltype=mshprism
number_of_nodes=6
#if we want 2nd order, fix it here
else:
error("unsupported eltype type=%s"\
%(repr(el[1])),3)
n=[ #elm-number
el[0],
#elm-type
eltype,
#reg-phys
mshmodelnum,
#reg-elem
0,
#number-of-nodes
number_of_nodes]
#node-number-list
n.extend(el[2:2+number_of_nodes])
f.write("%d "*len(n)%tuple(n))
f.write("\n")
l=f.write("$EndElements\n")
f.close()
def readNOD(self,filename,scale=1.0):
"""Read nodes from filename (*.NOD).
"""
f=file(filename)
l=f.readline()
self.nodes=[]
self.is2d=True
while l:
x=string.split(l)
node=(int(x[0]),
myfloat(x[1])*scale,
myfloat(x[2])*scale,
myfloat(x[3])*scale)
if node[3]!=0.0:
self.is2d=False
self.nodes.append(node)
l=f.readline()
def writeNOD(self,filename):
"""Write nodes to filename (*.NOD).
"""
f=file(filename,"w")
for nod in self.nodes:
f.write("%d %f %f %f\n"%tuple(nod))
def readELE(self,filename,symmetric=False):
"""Read elements from filename (*.ELE).
"""
#format:first line don't know yet
#(n,NDIM,nnod,n1,n2,...,n_nnod,T1,T2,...,T_nnod)...
#n....... element number
#NDIM ... probably dimension of the problem 2 or 3
#nnod ... number of nodes of the element
#n1...n_nnod ... nodes
#T1...T_nnod ... THICK (probably only for 2D problems)
f=file(filename)
l=f.readline()
data=[]
for l in f.readlines(): data.extend(string.split(l))
self.elements=[]
n=1
pos=0
while pos<len(data):
if n!=int(data[pos]):
error("element number mischmatch",2)
nnod=int(data[pos+2])
x=[int(i) for i in data[pos:pos+3+nnod]]
ndim=x[1]
if nnod==6:
if symmetric:
ite=pmdtrianglerot
else:
ite=pmdtriangle
elif nnod==8:
if symmetric:
ite=pmdquadranglerot
else:
ite=pmdquadrangle
else:
error("unsupported element",2)
el=[n,ite]+x[3:3+nnod]
self.elements.append(el)
n+=1
if ndim==2:
nnod*=2
pos+=3+nnod
def renumber_elements(self):
self.writeELE("t1.ELE")
if os.access("XELM.ELE",os.F_OK): os.remove("XELM.ELE")
os.spawnv(os.P_WAIT,"/home/ondra/pmd/PMD/xelm",["xelm","t1.ELE"])
print "readELE"
self.readELE("XELM.ELE")
print "removing"
os.remove("XELM.ELE")
os.remove("t1.ELE")
el=[]
for e in self.elements:
el.append(e[:-3])
self.elements=el
def readELE2(self,filename):
"""Read elements from filename (*.ELE).
"""
#format:first line don't know yet
#(n,nnod-1,n1,n2,...,n_nnod)...
#n....... element number
#nnod ... number of nodes of the element
#n1...n_nnod ... nodes
f=file(filename)
data=[]
for l in f.readlines(): data.extend(string.split(l))
self.elements=[]
n=1
while data:
# if n!=int(data[0]):
# error("element number mischmatch",2)
nnod=int(data[1])+1
x=[int(i) for i in data[0:2+nnod]]
if nnod==6:
ite=pmdtriangle
elif nnod==8:
ite=pmdhexahedron
else:
error("unsupported element",2)
el=[n,ite]+x[2:2+nnod]
self.elements.append(el)
n+=1
data=data[2+nnod:]
def sortnodes(self):
def mycmp(a,b):
if a[0] > b[0]:
return 1
elif a[0] < b[0]:
return -1
else:
return 0
self.nodes.sort(mycmp)
def writeELE(self,filename):
"""Write nodes to filename (*.ELE).
"""
f=file(filename,"w")
f.write("%d %d %d %d\n"%(0,2,8,2))
if self.is2d:
ndim=2
else:
ndim=3
for el in self.elements:
nods=tuple(el[2:])+(0,0,0)
nums=(el[0],ndim,len(nods))+tuple(nods)
f.write((" %d"*len(nums)+"\n")%nums)
if self.is2d:
nums=[1.0]*len(nods)
f.write((" %.2f"*len(nums)+"\n")%tuple(nums))
def readpmd(self,filename):
"""Read the mesh from filename (*.I1).
Can read I1, which was produced by writepmd().
Should be able to read some hand written I1s from PMD Example
Manual. Sometimes you will have to tweak the parser to read
your syntax.
I suggest to only use the writepmd() I1 syntax. This
is easily readable/writable.
"""
self.clean()
f=file(filename,"r")
def C(x):
if x[0]==";":
x=f.readline()
return x
x=string.split(C(f.readline()))
check(x[0],"IP")
p=[int(a) for a in x[1:]]
nelements=p[0]; nnodes=p[1]; ited=p[2]; kss=p[9]
if kss==2:
self.symmetric=True
else:
self.symmetric=False
x=string.split(C(f.readline()))
check(x[0],"RP")
p=[float(a) for a in x[1:]]
self.crit=p[0]; scale=p[1]; thdef=p[2]; self.boundbox=p[3:9]
x=string.split(C(f.readline()))
check(x[0],"XY")
if len(x)>1: #old format....
dict={}
def ev1(_str,loc,toks):
out= range(int(toks[0]),int(toks[2])+1)
out=[str(a) for a in out]
# print toks, "->", out
return out
def ev2(str,loc,toks):
out=int(toks[0])*toks.asList()[2:]
# print toks, "->", out
return out
def ev3(str,loc,toks):
key=toks[1]
out=toks.asList()[2:len(toks)-2]
d=[]
for a in out:
num=eval("%s"%(a))
d.append(num)
dict[key]=d
#print toks, "->", out
return out
def ev4(_str,loc,toks):
if toks[1][0] in nums:
n=eval("%s"%(toks[1]))
key=toks[2]
else:
n=0
key=toks[1]
assert(dict.has_key(key))
#increment dict[key] by n
out=[str(a+n) for a in dict[key]]
#print toks, "dict->", out
return out
point = Literal(".")
e = CaselessLiteral("E")
inum = Combine(Word("+-"+nums,nums)+
Optional(e+Word( "+-"+nums, nums ) ) )
fnum = Combine( Word( "+-"+nums, nums ) +
Optional(point+Optional(Word(nums))) +
Optional(e+Word("+-"+nums,nums ) ) )
semi = Literal(";")
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
callpar = Literal("=")+Optional(inum)+Word(alphas,max=1)
defpar = Literal("=")+Word(alphas,max=1)
comment = semi + Optional(restOfLine)
terms=Forward()
atom=fnum | (lpar+ terms+rpar)
seq=(atom+":"+atom).setParseAction(ev1)
rep=(inum+"*"+atom).setParseAction(ev2)
terms << OneOrMore(rep | seq | atom)
numlist=OneOrMore(terms|(defpar+terms+
defpar).setParseAction(ev3) |
callpar.setParseAction(ev4))
nodX = Group("X"+numlist)
#nodX = Group("X"+numlist+LineEnd())
nodY = Group("Y"+numlist)
nodZ = Group("Z"+numlist)
nodes=Group(Dict(Literal("XY")+Group("N"+numlist)+nodX+nodY+Optional(nodZ)))
element= Group(Literal("EL")+Optional("T"+inum)+"E"+numlist+"N"+
OneOrMore(numlist))
CN=Group("CN"+restOfLine)
elements=Group(OneOrMore(element)).setResultsName("ELs")
EN=Literal("EN")
end=Group(EN+EN)
grammar=Dict(nodes+elements+end+StringEnd())
grammar.ignore(comment)
grammar.ignore(CN)
data=[string.join(x)+"\n"]
data.extend(f.readlines())
data=string.join(data)
tokens=""
try:
tokens=grammar.parseString(data)
except ParseException, err:
error("\n"+err.line+"\n"+" "*(err.column-1)+\
"^\n" + repr(err),2)
if "Z" in tokens["XY"].keys():
Zn=map(float,tokens["XY"]["Z"])
self.is2d=False
else:
Zn=[0.0]*len(tokens["XY"]["X"])
self.nodes=zip(range(1,len(Zn)+1),
map(float,tokens["XY"]["X"]),
map(float,tokens["XY"]["Y"]),
Zn)
elm={}
els= tokens["ELs"]
for el in els:
i=1
if el[i]=="T":
type=int(el[i+1])
i+=2
else:
type=ited
if type==pmdhexahedron:
nnum=20
nskip=20
elif type==pmdtriangle:
nnum=3
nskip=6
elif type==pmdquadrangle:
nnum=4
nskip=8
elif type==pmdsemiloof:
nnum=8
nskip=8
else:
error("unsupported type. "\
"type=%d"%(type),3)
el=el.asList()[i:]
n=1
while el[n]!="N": n+=1
n+=1
for e in el[1:n-1]:
elm[int(e)]=(int(e),type)+tuple(
map(int,el[n:n+nnum]))
n+=nskip
n=1
for i,e in elm.iteritems():
self.elements.append(e)
assert(i==n)
n+=1
else:
for n in range(1,nnodes+1):
x=string.split(C(f.readline()))
check(x[0],"C")
if int(x[1])!=n: error("node number mismatch",2)
if len(x)==4:
node=(n,float(x[2]),float(x[3]),
float(0))
else:
node=(n,float(x[2]),float(x[3]),
float(x[4]))
self.is2d=False
self.nodes.append(node)
x=string.split(C(f.readline()))
for n in range(1,nelements+1):
# print x
check(x[0],"EL")
check(x[1],"T")
T=int(x[2])
check(x[3],"E")
if int(x[4])!=n: error("node number mismatch",2)
check(x[5],"N")
el=[n,T]
el.extend([int(a) for a in x[6:]])
x=string.split(C(f.readline()))
if x[0] != "EL" and x[0] != "EN":
el.extend([int(a) for a in x])
x=string.split(C(f.readline()))
self.elements.append(el)
check(x[0],"EN")
x=string.split(C(f.readline()))
check(x[0],"EN")
f.close()
#apply scale factor
self.nodes=[(i[0],i[1]*scale,i[2]*scale,i[3]*scale) for i in
self.nodes]
self.boundbox=[i*scale for i in self.boundbox]
def writepmd(self,filename):
"Writes the mesh to filename (*.I1)."
f=file(filename,"w")
ited=4;
if self.symmetric:
kss=2
else:
kss=-1
f.write("IP %d %d %d 0 0 0 0 0 0 %d\n"
%(len(self.elements),len(self.nodes),ited,kss));
scale=1.0;thdef=1;
str=[self.crit,scale,thdef]
str.extend(self.boundbox)
f.write("RP %.2f %.4f %.3f %.3f %.3f %.3f %.3f %.3f %.3f 0\n"
%tuple(str))
f.write("XY\n");
for p in self.nodes:
if self.is2d:
f.write(" C %d %.17f %.17f\n"%(p[0],p[1],p[2]))
else:
f.write(" C %d %.17f %.17f %.17f\n"%
(p[0],p[1],p[2],p[3]))
for p in self.elements:
f.write("EL T %d E %d N"%(p[1],p[0]))
for a in p[2:]: f.write(" %d"%(a))
f.write("\n")
f.write("EN\n");
f.write("EN\n");
f.close()
def readxt2sSTR(self,filename):
"Reads temperature data from filename (*.STR) to scalars."
f=file(filename,"r")
l=f.readline()
l=f.readline()
check(l,"$TEMPERATURE\n")
l=f.readline()
n=0
self.scalars=[]
while l:
n+=1
x=string.split(l)
p=[float(a) for a in x]
if p[0] != n: error("node-number mismatch",2)
self.scalars.append(p[1])
if p[2] != 0: error("2nd number is not zero",2)
if p[3] != 0: error("2nd number is not zero",2)
l=f.readline()
f.close()
def readstr2STR(self,filename):
"Reads temperature data from filename (*.STR) to scalars."
f=file(filename,"r")
l=f.readline()
l=f.readline()
check(l,"$DISPLACEMENT\n")
l=f.readline()
n=0
self.vectors=[]
while l:
n+=1
x=string.split(l)
p=[float(a) for a in x]
if p[0] != n: error("node-number mismatch",2)
self.vectors.append(p[1:4])
l=f.readline()
if n==len(self.nodes): break
check(l,"$ELEMENT\n")
l=f.readline()
n=0
self.elementdata=[]
while l:
n+=1
x=string.split(l)
check(x[0],"*IE")
if int(x[1]) != n: error("node-number mismatch",2)
check(int(x[2]),1)
check(float(x[3]),0)
l=f.readline()
check(l,"*STRESS\n")
for i in range(24):
l=f.readline()
l=f.readline()
check(l,"*HMH\n")
nums=[]
for i in range(4):
l=f.readline()
x=string.split(l)
p=[float(a) for a in x]
nums.extend(p)
self.elementdata.append(nums)
l=f.readline()
check(l,"*TAU\n")
for i in range(4):
l=f.readline()
l=f.readline()
check(l,"*SCALAR1\n")
for i in range(4):
l=f.readline()
l=f.readline()
check(l,"*SCALAR2\n")
for i in range(4):
l=f.readline()
l=f.readline()
f.close()
def readstr3STR(self,filename):
"Reads temperature data from filename (*.STR) to scalars."
f=file(filename,"r")
l=f.readline()
l=f.readline()
check(l,"$DISPLACEMENT\n")
l=f.readline()
n=0
self.vectors=[]
while l:
n+=1
x=string.split(l)
p=[float(a) for a in x]
if p[0] != n: error("node-number mismatch",2)
self.vectors.append(p[1:4])
l=f.readline()
if n==len(self.nodes): break
check(l,"$ELEMENT\n")
l=f.readline()
n=0
self.elementdata=[]
while l:
n+=1
x=string.split(l)
check(x[0],"*IE")
if int(x[1]) != n: error("node-number mismatch",2)
check(int(x[2]),1)
check(float(x[3]),0)
l=f.readline()
check(l,"*STRESS\n")
for i in range(24):
l=f.readline()
l=f.readline()
check(l,"*HMH\n")
nums=[]
for i in range(4):
l=f.readline()
x=string.split(l)
p=[float(a) for a in x]
nums.extend(p)
self.elementdata.append(nums)
l=f.readline()
check(l,"*TAU\n")
for i in range(4):
l=f.readline()
l=f.readline()
f.close()
def writevectorspos(self,filename,infotext="PMD_vectors"):
"""Writes scalars together with nodes and elements to filename.
Optional parameter infotext specifies the name of the view
in the pos file.
1) Associates a scalar with every node, so gmsh
shows points.
2) Associates a scalar with every node of all elements, so gmsh
fills the whole element (triangle, quadrangle etc.) with an
extrapolated color.
You can set visibility in gmsh (only points, only triangles...).
"""
if len(self.nodes) != len(self.vectors):
error("Different number of nodes and vectors!")
f=file(filename,"w")
f.write("$PostFormat\n")
#1.3 file-type data-size
f.write("%g %d %d\n"%(1.2,0,8))
f.write("$EndPostFormat\n")
f.write("$View\n")
f.write("%s %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d %d\n"%(
#view-name nb-time-steps
infotext, 1,
#nb-scalar-points nb-vector-points nb-tensor-points
0,len(self.nodes),0,
#nb-scalar-lines nb-vector-lines nb-tensor-lines
0,0,0,
#nb-scalar-triangles nb-vector-triangles nb-tensor-triangles
0,0,0,
#0,self.getnumtriangles(),0,
#nb-scalar-quadrangles nb-vector-quadrangles nb-tensor-quadrangles
0,0,0,
#0,self.getnumquadrangles(),0,
#nb-scalar-tetrahedra nb-vector-tetrahedra nb-tensor-tetrahedra
0,0,0,
#nb-scalar-hexahedra nb-vector-hexahedra nb-tensor-hexahedra
0,0,0,
#nb-scalar-prisms nb-vector-prisms nb-tensor-prisms
0,0,0,
#nb-scalar-pyramids nb-vector-pyramids nb-tensor-pyramids
0,0,0,
#nb-text2d nb-text2d-chars nb-text3d nb-text3d-chars
0,0,0,0
))
#time-step-values
f.write("%d\n"%(0))
#< scalar-point-value > ...
for node in self.nodes:
n=(self.getxyz(node[0]),)
T=(self.getvector(node[0]),)
f.write(formatpos2(n,T))
f.write("$EndView\n")
f.close()
def writevectorspos3(self,filename,vectorfield,infotext="PMD_vectors"):
self.vectors=vectorfield
self.writevectorspos(filename,infotext)
def writescalarspos3(self,filename,scalars,infotext="PMD_scalars"):
self.scalars=scalars
self.writescalarspos(filename,infotext)
def writescalarspos(self,filename,infotext="PMD_scalars"):
"""Writes self.scalars to *.pos.
Optional parameter infotext specifies the name of the view
in the pos file.
1) Associates a scalar with every node, so gmsh shows points.
2) Associates a scalar with every node of all elements, so gmsh fills
the whole element (triangle, quadrangle, tetrahedra, ... etc.)
with an extrapolated color.
You can set visibility in gmsh (only points, only triangles...).
"""
if len(self.nodes) != len(self.scalars):
error("Different number of nodes and scalars!")
up=progressbar.MyBar("Writing scalar field to %s:"%filename)
up.init(len(self.nodes)+len(self.elements))
f=file(filename,"w")
f.write("$PostFormat\n")
#1.3 file-type data-size
f.write("%g %d %d\n"%(1.2,0,8))
f.write("$EndPostFormat\n")
f.write("$View\n")
f.write("%s %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d %d\n"%(
#view-name nb-time-steps
infotext, 1,
#nb-scalar-points nb-vector-points nb-tensor-points
len(self.nodes),0,0,
#nb-scalar-lines nb-vector-lines nb-tensor-lines
0,0,0,
#nb-scalar-triangles nb-vector-triangles nb-tensor-triangles
self.getnumtriangles(),0,0,
#nb-scalar-quadrangles nb-vector-quadrangles nb-tensor-quadrangles
self.getnumquadrangles(),0,0,
#nb-scalar-tetrahedra nb-vector-tetrahedra nb-tensor-tetrahedra
self.getnumtetrahedra(),0,0,
#nb-scalar-hexahedra nb-vector-hexahedra nb-tensor-hexahedra
self.getnumhexahedra(),0,0,
#nb-scalar-prisms nb-vector-prisms nb-tensor-prisms
self.getnumprisms(),0,0,
#nb-scalar-pyramids nb-vector-pyramids nb-tensor-pyramids
0,0,0,
#nb-text2d nb-text2d-chars nb-text3d nb-text3d-chars
0,0,0,0
))
#time-step-values
f.write("%d\n"%(0))
c=0
#< scalar-point-value > ...
for node in self.nodes:
n=(self.getxyz(node[0]),)
T=(self.getscalar(node[0]),)
f.write(formatpos(n,T))
c+=1
up.update(c)
#< scalar-triangles-value > ...
for el in self.elements:
if el[1] in [pmdtriangle,pmdtrianglerot]:
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]))
T=(self.getscalar(el[2]),
self.getscalar(el[3]),
self.getscalar(el[4]))
f.write(formatpos(n,T))
#< scalar-quadrangles-value > ...
for el in self.elements:
if el[1] == pmdquadrangle:
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]),
self.getxyz(el[5]))
T=(self.getscalar(el[2]),
self.getscalar(el[3]),
self.getscalar(el[4]),
self.getscalar(el[5]))
f.write(formatpos(n,T))
#< scalar-tetrahedra-value > ...
for el in self.elements:
if el[1] == pmdtetrahedron:
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]),
self.getxyz(el[5]))
T=(self.getscalar(el[2]),
self.getscalar(el[3]),
self.getscalar(el[4]),
self.getscalar(el[5]))
f.write(formatpos(n,T))
c+=1
up.update(c)
#< scalar-hexahedra-value > ...
for el in self.elements:
if el[1] == pmdhexahedron:
#print el
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]),
self.getxyz(el[5]),
self.getxyz(el[6]),
self.getxyz(el[7]),
self.getxyz(el[8]),
self.getxyz(el[9]))
T=(self.getscalar(el[2]),
self.getscalar(el[3]),
self.getscalar(el[4]),
self.getscalar(el[5]),
self.getscalar(el[6]),
self.getscalar(el[7]),
self.getscalar(el[8]),
self.getscalar(el[9]))
f.write(formatpos(n,T))
#< scalar-prisms-value > ...
for el in self.elements:
if el[1] == pmdprism:
#print el
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]),
self.getxyz(el[5]),
self.getxyz(el[6]),
self.getxyz(el[7]))
T=(self.getscalar(el[2]),
self.getscalar(el[3]),
self.getscalar(el[4]),
self.getscalar(el[5]),
self.getscalar(el[6]),
self.getscalar(el[7]))
f.write(formatpos(n,T))
f.write("$EndView\n")
f.close()
def writescalarspos2(self,filename,scaltime,infotext="PMD_scalars",dt=1.0):
"""Writes self.scalars to *.pos.
Optional parameter infotext specifies the name of the view
in the pos file.
1) Associates a scalar with every node, so gmsh shows points.
2) Associates a scalar with every node of all elements, so gmsh fills
the whole element (triangle, quadrangle, tetrahedra, ... etc.)
with an extrapolated color.
You can set visibility in gmsh (only points, only triangles...).
"""
f=file(filename,"w")
f.write("$PostFormat\n")
#1.3 file-type data-size
f.write("%g %d %d\n"%(1.2,0,8))
f.write("$EndPostFormat\n")
f.write("$View\n")
f.write("%s %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d %d\n"%(
#view-name nb-time-steps
infotext, len(scaltime),
#nb-scalar-points nb-vector-points nb-tensor-points
#len(self.nodes),0,0,
0,0,0,
#nb-scalar-lines nb-vector-lines nb-tensor-lines
0,0,0,
#nb-scalar-triangles nb-vector-triangles nb-tensor-triangles
self.getnumtriangles(),0,0,
#nb-scalar-quadrangles nb-vector-quadrangles nb-tensor-quadrangles
#self.getnumquadrangles(),0,0,
0,0,0,
#nb-scalar-tetrahedra nb-vector-tetrahedra nb-tensor-tetrahedra
#self.getnumtetrahedra(),0,0,
0,0,0,
#nb-scalar-hexahedra nb-vector-hexahedra nb-tensor-hexahedra
#self.getnumhexahedra(),0,0,
0,0,0,
#nb-scalar-prisms nb-vector-prisms nb-tensor-prisms
#self.getnumprisms(),0,0,
0,0,0,
#nb-scalar-pyramids nb-vector-pyramids nb-tensor-pyramids
0,0,0,
#nb-text2d nb-text2d-chars nb-text3d nb-text3d-chars
0,0,0,0
))
for timestep in range(len(scaltime)):
#time-step-values
f.write("%f "%(timestep*dt))
f.write("\n")
#< scalar-triangles-value > ...
for el in self.elements:
if el[1] in [pmdtriangle,pmdtrianglerot]:
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]))
T=[]
for timestep in range(len(scaltime)):
self.scalars=scaltime[timestep]
if len(self.nodes) != len(self.scalars):
error("Different number of nodes and scalars!")
T.extend((self.getscalar(el[2]),
self.getscalar(el[3]),
self.getscalar(el[4])))
f.write(formatpos(n,T))
f.write("$EndView\n")
f.close()
def writescalars(self,filename,scalars,C=0.0):
f=file(filename,"w")
for n,s in zip(self.nodes,scalars):
f.write("%4d %f\n"%(n[0],s+C))
def writestresspos(self,filename,infotext="PMD_stress"):
"""Writes scalars together with nodes and elements to filename.
Optional parameter infotext specifies the name of the view
in the pos file.
1) Associates a scalar with every node, so gmsh
shows points.
2) Associates a scalar with every node of all elements, so gmsh
fills the whole element (triangle, quadrangle etc.) with an
extrapolated color.
You can set visibility in gmsh (only points, only triangles...).
"""
if len(self.elements) != len(self.elementdata):
error("Different number of elements and data!")
f=file(filename,"w")
f.write("$PostFormat\n")
#1.3 file-type data-size
f.write("%g %d %d\n"%(1.2,0,8))
f.write("$EndPostFormat\n")
f.write("$View\n")
f.write("%s %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d\n"
"%d %d %d %d\n"%(
#view-name nb-time-steps
infotext, 1,
#nb-scalar-points nb-vector-points nb-tensor-points
0,0,0,
#nb-scalar-lines nb-vector-lines nb-tensor-lines
0,0,0,
#nb-scalar-triangles nb-vector-triangles nb-tensor-triangles
self.getnumtriangles(),0,0,
#nb-scalar-quadrangles nb-vector-quadrangles nb-tensor-quadrangles
self.getnumquadrangles(),0,0,
#nb-scalar-tetrahedra nb-vector-tetrahedra nb-tensor-tetrahedra
self.getnumtetrahedra(),0,0,
#nb-scalar-hexahedra nb-vector-hexahedra nb-tensor-hexahedra
0,0,0,
#nb-scalar-prisms nb-vector-prisms nb-tensor-prisms
0,0,0,
#nb-scalar-pyramids nb-vector-pyramids nb-tensor-pyramids
0,0,0,
#nb-text2d nb-text2d-chars nb-text3d nb-text3d-chars
0,0,0,0
))
#time-step-values
f.write("%d\n"%(0))
#< scalar-triangles-value > ...
for el,data in zip(self.elements,self.elementdata):
if el[1] == pmdtriangle:
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]))
T=data[:3]
f.write(formatpos(n,T))
#< scalar-quadrangles-value > ...
for el,data in zip(self.elements,self.elementdata):
if el[1] == pmdquadrangle:
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]),
self.getxyz(el[5]))
T=data[:4]
f.write(formatpos(n,T))
#< scalar-tetrahedra-value > ...
for el,data in zip(self.elements,self.elementdata):
if el[1] == pmdtetrahedron:
n=(self.getxyz(el[2]),
self.getxyz(el[3]),
self.getxyz(el[4]),
self.getxyz(el[5]))
T=data[:4]
f.write(formatpos(n,T))
f.write("$EndView\n")
f.close()
def average(self,list):
a=0
for i in list:
a+=i
a/=len(list)
return a
def average_vectors(self,list):
a=[0,0,0]
for i in list:
a[0]+=i[0]
a[1]+=i[1]
a[1]+=i[1]
a[0]/=len(list)
a[1]/=len(list)
a[2]/=len(list)
return a
def convert_el_to_nodes(self,els):
tmp=[]
for i in self.nodes:
tmp.append([])
for el,data in zip(self.elements,els):
nodes=el[2:]
if self.is2d:
assert(data[len(nodes)*2]==0)
for node in nodes:
tmp[node-1].append(data)
self.scalars=[]
for s in tmp:
assert s!=[]
self.scalars.append(self.average(s))
def convert_stress_to_nodes(self):
tmp=[]
for i in self.nodes:
tmp.append([])
for el,data in zip(self.elements,self.elementdata):
nodes=el[2:]
if self.is2d:
assert(data[len(nodes)*2]==0)
for node,scalar in zip(nodes,data):
tmp[node-1].append(scalar)
self.scalars=[]
for s in tmp:
self.scalars.append(self.average(s))
def getxyz(self,n):
"Returns a tuple (x,y,z) of a node whose number is n."
n-=1
if n<0 or n>=len(self.nodes):
error("node not found")
node=self.nodes[n]
if node[0] != n+1: error("node numbers mischmatch",3)
return [node[1],node[2],node[3]]
def getscalar(self,n):
"Returns a scalar of a node whose number is n."
n-=1
if n<0 or n>=len(self.scalars):
error("node not found")
return self.scalars[n]
def getvector(self,n):
"Returns a vector associated to a node whose number is n."
n-=1
if n<0 or n>=len(self.vectors):
error("node not found")
return self.vectors[n]
def getnumtriangles(self):
n=0
for x in self.elements:
if x[1] in [pmdtriangle,pmdtrianglerot]: n+=1
return n
def getnumquadrangles(self):
n=0
for x in self.elements:
if x[1] == pmdquadrangle: n+=1
return n
def getnumtetrahedra(self):
n=0
for x in self.elements:
if x[1] == pmdtetrahedron: n+=1
return n
def getnumhexahedra(self):
n=0
for x in self.elements:
if x[1] == pmdhexahedron: n+=1
return n
def getnumprisms(self):
n=0
for x in self.elements:
if x[1] == pmdprism: n+=1
return n
def removecentralnodes(self,nods):
if nods==[]:
return
nods.sort()
n=nods[0]
for nod in nods:
if n!=nod:
#n=nod
error("faces aren't consecutive",2)
n+=1
if nods[-1] != self.nodes[-1][0]:
error("faces aren't at the end of self.nodes",2)
self.nodes=self.nodes[:len(self.nodes)-len(nods)]
def getscalar2(self,n,scalars):
"Returns a scalar of a node whose number is n."
n-=1
if n<0 or n>=len(scalars):
error("node not found")
return scalars[n]
def scalars_elements2nodes(self,scalarsel):
assert(len(self.elements)==len(scalarsel))
tmp=[]
for i in self.nodes:
tmp.append([])
for el,data in zip(self.elements,scalarsel):
nodes=el[2:]
#if self.is2d:
# assert(data[len(nodes)*2]==0)
for node,scalar in zip(nodes,data):
tmp[node-1].append(scalar)
scalars=[]
for s in tmp:
scalars.append(self.average(s))
return scalars
def vectors_elements2nodes(self,scalarsel):
assert(len(self.elements)==len(scalarsel))
tmp=[]
for i in self.nodes:
tmp.append([])
for el,data in zip(self.elements,scalarsel):
nodes=el[2:]
#if self.is2d:
# assert(data[len(nodes)*2]==0)
for node,scalar in zip(nodes,data):
tmp[node-1].append(scalar)
scalars=[]
for s in tmp:
if s==[]: error("There are some extra nodes!")
scalars.append(self.average_vectors(s))
return scalars
def dist(self,a,b):
p=self.getxyz(a)
q=self.getxyz(b)
return math.sqrt((p[0]-q[0])**2+(p[1]-q[1])**2+(p[2]-q[2])**2)
def dist2(self,p,q):
return math.sqrt((p[0]-q[0])**2+(p[1]-q[1])**2+(p[2]-q[2])**2)
def det(self,x,y):
return x[0]*y[1]+x[1]*y[2]+x[2]*y[0]-(x[0]*y[2]+x[1]*y[0]+x[2]*y[1])
def computegrad(self,scalars):
"""Returns the gradient of ``scalars`` (both are given in nodes)."""
grad=[]
for e in self.elements:
if e[1]==pmdtriangle or e[1]==pmdtrianglerot:
#nodes e[2],e[3],e[4]
a1=self.getxyz(e[2])
a1[2]=self.getscalar2(e[2],scalars)
a2=self.getxyz(e[3])
a2[2]=self.getscalar2(e[3],scalars)
a3=self.getxyz(e[4])
a3[2]=self.getscalar2(e[4],scalars)
a=self.det((a1[1],a2[1],a3[1]),(a1[2],a2[2],a3[2]))
b=-self.det((a1[0],a2[0],a3[0]),(a1[2],a2[2],a3[2]))
c=self.det((a1[0],a2[0],a3[0]),(a1[1],a2[1],a3[1]))
v=(-a/c,-b/c,0)
#print v
grad.append((v,v,v))
elif e[1]==pmdtetrahedron:
#quick hack...
#nodes e[2],e[3],e[4],e[5]
a1=self.getxyz(e[2])
a2=self.getxyz(e[3])
d=(self.getscalar2(e[2],scalars)-self.getscalar2(e[3],scalars))\
/self.dist2(a1,a2)
v=((a1[0]-a2[0])*d,
(a1[1]-a2[1])*d,
(a1[2]-a2[2])*d)
#if e[0]==2879: print v
grad.append((v,v,v,v))
else:
error("Element not implemented yet.")
return self.vectors_elements2nodes(grad)
def computenorm(self,vectors):
"""Returns sqrt(x^2+y^2+z^2) for all vectors (x,y,z) in ``vectors``."""
scalars=[]
for v in vectors:
scalars.append(self.dist2(v,(0,0,0)))
return scalars
def printinfo(self):
print "nodes: %d"%(len(self.nodes))
print "elements: %d"%(len(self.elements))
def readGMV(self,filename,what=2):
"""Reads GMV file.
what ... 0 read only mesh
... 1 read only data
... 2 read both
"""
if what in [0,2]:
self.clean()
f=file(filename)
l=f.readline(); check(l,"gmvinput ascii\n")
l=f.readline(); check(l,"\n")
l=f.readline();
x=l.split()
check(x[0],"nodes")
nnod=int(x[1])
l=f.readline();
if what in [0,2]:
xs=[float(x) for x in l.split()]
check(len(xs),nnod)
l=f.readline();
if what in [0,2]:
ys=[float(x) for x in l.split()]
check(len(ys),nnod)
l=f.readline();
if what in [0,2]:
zs=[float(x) for x in l.split()]
check(len(zs),nnod)
nodes=zip(range(1,nnod+1),xs,ys,zs)
l=f.readline();
check(l,"\n")
l=f.readline();
x=l.split()
check(x[0],"cells")
if what in [0,2]:
nelm=int(x[1])
elements=[]
for i in range(1,nelm+1):
l=f.readline();
x=l.split()
if x[0]=="quad":
check(x[1],"4")
l=f.readline();
p=[int(x) for x in l.split()]
elements.append((i,pmdquadrangle)+tuple(p))
elif x[0]=="phex8":
check(x[1],"8")
l=f.readline();
p=[int(x) for x in l.split()]
elements.append((i,pmdhexahedron)+tuple(p))
elif x[0]=="tri":
check(x[1],"3")
l=f.readline();
p=[int(x) for x in l.split()]
elements.append((i,pmdtriangle)+tuple(p))
elif x[0]=="tet":
check(x[1],"4")
l=f.readline();
p=[int(x) for x in l.split()]
elements.append((i,pmdtetrahedron)+tuple(p))
else:
error("unimplemented yet.",2)
self.nodes=nodes
self.elements=elements
if what in [1,2]:
while l!="variable\n":
l=f.readline();
l=f.readline();
l=f.readline();
scalars=[float(x) for x in l.split()]
check(len(scalars),nnod)
return scalars
def writeregions(self,filename):
f=open(filename,"w")
f.write(str(self.regions))
def writeBC(self,filename,verbose=True):
"""self.faces contain triplets (p1,p2,p3) which are triangles of
tetrahedrons on the boundary. We need to find the number of each
corresponding tetrahedron and it's side."""
def findelements(faces,elements):
"""Returns element numbers (in a list), which lies
at the boundary determined by faces.
Also returns the side of element. Currently only support
triangles and quadrangles.
"""
t=len(elements)
c=0
el=[]
for p in elements:
c+=1
if c%500==0: print 100.0*c/t
for ii,f in enumerate(faces):
nods=[]
for i,n in enumerate(p[2:]):
if n in f: nods.append(i+1)
if len(nods)!=len(f): continue
if len(f)==3:
if p[1] == pmdtetrahedron:
if nods==[1,2,3]:
side=1
elif nods==[1,2,4]:
side=2
elif nods==[2,3,4]:
side=3
elif nods==[1,3,4]:
side=4
else:
raise"findelements: tetrahedron face mischmatch"
el.append((p[0],side))
del faces[ii]
break;
else:
raise "findelements: unsupported element in mesh"
else:
raise "findelements: unsupported face %s"%(repr(f))
return el
def buildmapping(elements):
m={}
for e in elements:
for n in e[2:]:
if not m.has_key(n): m[n]=[]
m[n].append(e[0])
return m
def findelements2(faces,elements,nemap):
bc=[]
for f in faces:
assert len(f)==3
candidates=set(nemap[f[0]])
candidates.intersection_update(nemap[f[1]])
candidates.intersection_update(nemap[f[2]])
assert len(candidates)==1 #the face "f" belongs to just 1 el.
elnum=candidates.pop()
el=elements[elnum-1]
assert el[0]==elnum #the mapping "nemap" is correct
nods=[]
for i,n in enumerate(el[2:]):
if n in f: nods.append(i+1)
assert len(nods)==len(f) #the mapping "nemap" is correct
if el[1] == pmdtetrahedron:
if nods==[1,2,3]:
side=1
elif nods==[1,2,4]:
side=2
elif nods==[2,3,4]:
side=3
elif nods==[1,3,4]:
side=4
else:
raise"findelements: tetrahedron face mischmatch"
bc.append((elnum,side))
else:
raise "findelements: unsupported element in mesh"
return bc
up=progressbar.MyBar("Writing BC to %s:"%filename)
if verbose: up.init(2*len(self.faces))
nemap=buildmapping(self.elements)
bc={}
c=0
for key in self.faces:
#bc[key]=findelements(self.faces[key],self.elements)
bc[key]=findelements2(self.faces[key],self.elements,nemap)
c+=1
if verbose: up.update(c)
f=open(filename,"w")
#f.write(repr(bc))
f.write("%d\n"%len(bc))
for k in bc:
f.write("%d %d %s\n"%(k,len(bc[k]),numlist2str(flat(bc[k]))))
c+=1
if verbose: up.update(c)
# print bc[2]
# for i in range(len(bc[2])):
# print bc[2][i], self.elements[bc[2][i][0]-1]
def flat(a):
r=[]
for x in a:
if isinstance(x,list) or isinstance(x,tuple):
r.extend(flat(x))
else:
r.append(x)
return r
def numlist2str(x):
s=""
for i in x:
s+="%d "%i
return s[:-1]
def formatpos(n,T):
#n=(n1,n2,n3...)
#T=(T1,T2,T3...)
#nodes of the triangle (for example):
#n1=(x1,y1,z1), n2=(x2,y2,z2) and n3=(x3,y3,z3)
#format of *.pos:
#x1,x2,x3,y1,y2,y3,z1,z2,z3,T1,T2,T3
y=[]
for i in range(len(n[0])):
for j in range(len(n)):
y.append(n[j][i])
y.extend(T)
str="%.18f "*(len(y))%tuple(y)
return "%s\n"%(str)
def formatpos2(n,T):
#n=(n1,n2,n3...)
#T=(T1,T2,T3...)
#nodes of the triangle (for example):
#n1=(x1,y1,z1), n2=(x2,y2,z2) and n3=(x3,y3,z3)
#format of *.pos:
#x1,x2,x3,y1,y2,y3,z1,z2,z3,T1,T2,T3
y=[]
for i in range(len(n[0])):
for j in range(len(n)):
y.append(n[j][i])
for v in T:
y.extend(v)
str="%.18f "*(len(y))%tuple(y)
return "%s\n"%(str)
| Python |
import math
import geometry as geom
def conv(x):
s="["
for i in x:
s+=str(i)+","
return s[:-1]+"]"
def conv2(x):
s=""
for i in x:
s+=i+","
return s[:-1]
def conv3(x):
s=""
for i in x:
s+="'"+i+"',"
return s[:-1]
def conv4(a,b,c):
s="["
for i in a:
s+=str(i)+" "
s=s[:-1]+"; "
for i in b:
s+=str(i)+" "
s=s[:-1]+"; "
for i in c:
s+=str(i)+" "
return s[:-1]+"]"
def axissym(a,b,c):
"a,b ... line. c ... center"
d=[(ai+bi)/2.-ci for ai,bi,ci in zip(a,b,c)]
dn=math.sqrt(d[0]**2+d[1]**2+d[2]**2)
return [ci+di+0.1*di/dn for ci,di in zip(c,d)]
def femlabsurface3_old(f,n,p):
#fucking femlab - there is a bug in face3 for some points
assert len(p)==3
a=[y[0] for y in p]
b=[y[1] for y in p]
c=[y[2] for y in p]
return (f+"=face3(%s',%s',%s')\n")%(n,conv(a),conv(b),conv(c))
def curve2(f,p1,p2):
return "%s=curve2(%s,%s);\n"%(f,conv([p1[0],p2[0]]),conv([p1[1],p2[1]]))
def getp3(p1,p2,p3):
import math
a=norm2(vec(p2,p3))
b=norm2(vec(p1,p3))
c=norm2(vec(p1,p2))
cosphi=(b**2+c**2-a**2)/(2*b*c)
if cosphi>1.0: cosphi=1.0
return [b*cosphi, b*math.sqrt(1-cosphi**2)]
def femlabsurface3(f,n,p):
assert len(p)==3
variable=f%n
f1=variable+"f1"
f2=variable+"f2"
f3=variable+"f3"
g4=variable+"g4"
s=""
p1=[0,0]
p2=[norm2(vec(p[1],p[0])),0]
p3=getp3(p[0],p[1],p[2])
s+=curve2(f1,p1,p2)
s+=curve2(f2,p2,p3)
s+=curve2(f3,p3,p1)
s+="%s=geomcoerce('solid',{%s,%s,%s});\n"%(g4,f1,f2,f3)
a=[y[0] for y in p]
b=[y[1] for y in p]
c=[y[2] for y in p]
# print p,norm2(crossproduct(vec(p[0],p[1]), vec(p[0],p[2])))
s+="%s=embed(%s,'Wrkpln',%s);\n"%(variable,g4,conv4(a,b,c))
return s
def femlabline(f,n,p):
assert len(p)==2
a=[y.getxyz()[0] for y in p]
b=[y.getxyz()[1] for y in p]
c=[y.getxyz()[2] for y in p]
return (f+"=curve3(%s,%s,%s)\n")%(n,conv(a),conv(b),conv(c))
def triangulate2(points):
if len(points)==3:
return (points,)
if len(points)==4:
return ( (points[0],points[1],points[2]),
(points[0],points[2],points[3]) )
import Polygon
q=Polygon.Polygon(points)
strips=Polygon.TriStrip(q)
tri=[]
for t in strips:
for n in range(len(t)-2):
tri.append((t[n],t[n+1],t[n+2]))
return tri
def triangulate(points):
from poly import poly
# if len(points)>3:
# print "-"*60
# print points
# print poly.triangulate(points)
# print "-"*60
return poly.triangulate(points)
def crossproduct(a,b):
return (a[1]*b[2]-a[2]*b[1], a[2]*b[0]-a[0]*b[2], a[0]*b[1]-a[1]*b[0])
def norm2(x):
import math
return math.sqrt(x[0]**2+x[1]**2+x[2]**2)
def norm(x):
n=norm2(x)
return [xi/n for xi in x]
def vec(a,b):
return [ai-bi for ai,bi in zip(a,b)]
def getnormal(a,b,c):
v1=[ai-bi for ai,bi in zip(a,b)]
v2=[ci-ai for ai,ci in zip(a,c)]
return norm(crossproduct(v1,v2))
def tri3d(points):
n=getnormal(points[0],points[1],points[2])
p=points[0]
d=-(n[0]*p[0]+n[1]*p[1]+n[2]*p[2])
#print n,d
for p in points:
f=n[0]*p[0]+n[1]*p[1]+n[2]*p[2]+d
assert abs(f)<1e-8
z=0
if abs(n[1])>abs(n[z]):z=1
if abs(n[2])>abs(n[z]):z=2
x=0
if x==z: x=1
y=1
if y==x or y==z: y=2
p2=[(p[x],p[y]) for p in points]
t=triangulate(p2)
result=[]
for i in range(len(t)):
pp=[]
for j in range(3):
p=[0,0,0]
p[x]=t[i][j][0]
p[y]=t[i][j][1]
p[z]=-(n[x]*p[x]+n[y]*p[y]+d)/n[z]
pp.append(p)
result.append(pp)
return result
def triangulate_old(p):
tri=[]
for i in range(1,len(p)-1):
tri.append((p[0],p[i],p[i+1]))
return tri
def femlabsurface(f,n,points):
s=""
x=f+"=geomcoerce('face',{"
p=[y.getxyz() for y in points]
# print n,":",len(p)
tri=tri3d(p)
for i,t in enumerate(tri):
if len(tri)==1:
ff=f
else:
ff=f+"P%d"%i
x+=f+"P%d,"%i
s+=femlabsurface3(ff,n,t)
if len(tri)>1:
x=x[:-1]+"})\n"
s+=x%tuple([n]*(len(tri)+1))
return s
def write_femlab(g,filename, export0D=False, export1D=False, export2D=False,
export3D=False):
if not export1D and not export2D and not export3D and not export3D:
export3D=True
head="""\
flclear fem
% COMSOL version
clear vrsn
vrsn.name = 'COMSOL 3.2';
vrsn.ext = '';
vrsn.major = 0;
vrsn.build = 222;
vrsn.rcs = '$Name: $';
vrsn.date = '$Date: 2005/09/01 18:02:30 $';
fem.version = vrsn;
"""
tail="""
clear appl
appl.mode.class = 'ConductiveMediaDC';
appl.shape = {};
appl.gporder = {};
appl.cporder = {};
appl.sshape = 2;
appl.assignsuffix = '_dc';
clear pnt
pnt.V0 = {};
pnt.type = {};
pnt.Qj0 = {};
pnt.name = {};
pnt.ind = [];
appl.pnt = pnt;
clear edg
edg.Qlj = {};
edg.name = {};
edg.ind = [];
appl.edg = edg;
clear bnd
bnd.Vref = {};
bnd.sigmabnd = {};
bnd.V0 = {};
bnd.Jn = {};
bnd.type = {};
bnd.dbnd = {};
bnd.J0 = {};
bnd.name = {};
bnd.ind = [];
appl.bnd = bnd;
clear equ
equ.init = {};
equ.cporder = {};
equ.T0 = {};
equ.res0 = {};
equ.gporder = {};
equ.Qj = {};
equ.sigma = {};
equ.usage = {};
equ.T = {};
equ.name = {};
equ.Je = {};
equ.sigmatensor = {};
equ.sigtype = {};
equ.alpha = {};
equ.ind = [];
appl.equ = equ;
fem.appl{1} = appl;
fem.sdim = {'x','y','z'};
fem.border = 1;
fem.units = 'SI';
% Multiphysics
fem=multiphysics(fem);
"""
s=""
objs=[]
if export0D:
for x in g.d0.values():
assert isinstance(x,geom.point)
s+="p%d=point3(%s)\n"%(x.getn(),x.getstr())
objs.append("p%d"%x.getn())
if export1D:
for x in g.d1.values():
if isinstance(x,geom.line):
p=x.getpoints()
s+=femlabline("l%s",x.getn(),p)
elif isinstance(x,geom.circle):
p=x.getpoints()
assert len(p)==3
s+=femlabline("l%s",x.getn(),(p[0],p[2]))
elif isinstance(x,geom.lineloop):
continue
else:
print "Warning: unknown element ",type(x)
continue
objs.append("l%d"%x.getn())
if export2D or export3D:
for x in g.d2.values():
if isinstance(x,geom.planesurface):
p=x.getpoints()
s+=femlabsurface("f%s",x.getn(),p);
elif isinstance(x,geom.ruledsurface):
p=x.getpoints()
s+=femlabsurface("f%s",x.getn(),p);
elif isinstance(x,geom.surfaceloop):
continue
else:
print "Warning: unknown element ",type(x)
continue
if export2D:
objs.append("f%s"%x.getn())
if export3D:
for x in g.d3.values():
if isinstance(x,geom.volume):
p=x.getsurfaces()
s+="s%s"%x.getn()+"=geomcoerce('solid',{"
for y in p:
s+="f%s,"%y.getn()
s=s[:-1]+"})\n"
else:
print "Warning: unknown element ",type(x)
continue
objs.append("s%s"%x.getn())
s+="clear s\ns.objs={%s};\ns.name={%s};\nfem.draw=struct('s',s);\n"%\
(conv2(objs),conv3(objs))
s= head+s+tail
open(filename,"w").write(s)
| Python |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#
# progressbar - Text progressbar library for python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Text progressbar library for python.
This library provides a text mode progressbar. This is tipically used
to display the progress of a long running operation, providing a
visual clue that processing is underway.
The ProgressBar class manages the progress, and the format of the line
is given by a number of widgets. A widget is an object that may
display diferently depending on the state of the progress. There are
three types of widget:
- a string, which always shows itself;
- a ProgressBarWidget, which may return a diferent value every time
it's update method is called; and
- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
expands to fill the remaining width of the line.
The progressbar module is very easy to use, yet very powerful. And
automatically supports features like auto-resizing when available.
"""
__author__ = "Nilton Volpato"
__author_email__ = "first-name dot last-name @ gmail.com"
__date__ = "2006-05-07"
__version__ = "2.2"
# Changelog
#
# 2006-05-07: v2.2 fixed bug in windows
# 2005-12-04: v2.1 autodetect terminal width, added start method
# 2005-12-04: v2.0 everything is now a widget (wow!)
# 2005-12-03: v1.0 rewrite using widgets
# 2005-06-02: v0.5 rewrite
# 2004-??-??: v0.1 first version
import sys, time
from array import array
try:
from fcntl import ioctl
import termios
except ImportError:
pass
import signal
class ProgressBarWidget(object):
"""This is an element of ProgressBar formatting.
The ProgressBar object will call it's update value when an update
is needed. It's size may change between call, but the results will
not be good if the size changes drastically and repeatedly.
"""
def update(self, pbar):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made.
At least this function must be overriden."""
pass
class ProgressBarWidgetHFill(object):
"""This is a variable width element of ProgressBar formatting.
The ProgressBar object will call it's update value, informing the
width this object must the made. This is like TeX \\hfill, it will
expand to fill the line. You can use more than one in the same
line, and they will all have the same width, and together will
fill the line.
"""
def update(self, pbar, width):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made. The parameter width is the total
horizontal width the widget must have.
At least this function must be overriden."""
pass
class ETA(ProgressBarWidget):
"Widget for the Estimated Time of Arrival"
def format_time(self, seconds):
return time.strftime('%H:%M:%S', time.gmtime(seconds))
def update(self, pbar):
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
eta = elapsed * pbar.maxval / pbar.currval - elapsed
return 'ETA: %s' % self.format_time(eta)
class FileTransferSpeed(ProgressBarWidget):
"Widget for showing the transfer speed (useful for file transfers)."
def __init__(self):
self.fmt = '%6.2f %s'
self.units = ['B','K','M','G','T','P']
def update(self, pbar):
if pbar.seconds_elapsed < 2e-6:#== 0:
bps = 0.0
else:
bps = float(pbar.currval) / pbar.seconds_elapsed
spd = bps
for u in self.units:
if spd < 1000:
break
spd /= 1000
return self.fmt % (spd, u+'/s')
class RotatingMarker(ProgressBarWidget):
"A rotating marker for filling the bar of progress."
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
if pbar.finished:
return self.markers[0]
self.curmark = (self.curmark + 1)%len(self.markers)
return self.markers[self.curmark]
class Percentage(ProgressBarWidget):
"Just the percentage done."
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class Bar(ProgressBarWidgetHFill):
"The bar of progress. It will strech to fill the line."
def __init__(self, marker='#', left='|', right='|'):
self.marker = marker
self.left = left
self.right = right
def _format_marker(self, pbar):
if isinstance(self.marker, (str, unicode)):
return self.marker
else:
return self.marker.update(pbar)
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
return bar
class ReverseBar(Bar):
"The reverse bar of progress, or bar of regress. :)"
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
return bar
default_widgets = [Percentage(), ' ', Bar()]
class ProgressBar(object):
"""This is the ProgressBar class, it updates and prints the bar.
The term_width parameter may be an integer. Or None, in which case
it will try to guess it, if it fails it will default to 80 columns.
The simple use is like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
But anything you want to do is possible (well, almost anything).
You can supply different widgets of any type in any order. And you
can even write your own widgets! There are many widgets already
shipped and you should experiment with them.
When implementing a widget update method you may access any
attribute or function of the ProgressBar object calling the
widget's update method. The most important attributes you would
like to access are:
- currval: current value of the progress, 0 <= currval <= maxval
- maxval: maximum (and final) value of the progress
- finished: True if the bar is have finished (reached 100%), False o/w
- start_time: first time update() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time
- percentage(): percentage of the progress (this is a method)
"""
def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
fd=sys.stderr):
assert maxval > 0
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.signal_set = False
if term_width is None:
try:
self.handle_resize(None,None)
signal.signal(signal.SIGWINCH, self.handle_resize)
self.signal_set = True
except:
self.term_width = 79
else:
self.term_width = term_width
self.currval = 0
self.finished = False
self.prev_percentage = -1
self.start_time = None
self.seconds_elapsed = 0
def handle_resize(self, signum, frame):
h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
self.term_width = w
def percentage(self):
"Returns the percentage of the progress."
return self.currval*100.0 / self.maxval
def _format_widgets(self):
r = []
hfill_inds = []
num_hfill = 0
currwidth = 0
for i, w in enumerate(self.widgets):
if isinstance(w, ProgressBarWidgetHFill):
r.append(w)
hfill_inds.append(i)
num_hfill += 1
elif isinstance(w, (str, unicode)):
r.append(w)
currwidth += len(w)
else:
weval = w.update(self)
currwidth += len(weval)
r.append(weval)
for iw in hfill_inds:
r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
return r
def _format_line(self):
return ''.join(self._format_widgets()).ljust(self.term_width)
def _need_update(self):
return int(self.percentage()) != int(self.prev_percentage)
def update(self, value):
"Updates the progress bar to a new value."
assert 0 <= value <= self.maxval
self.currval = value
if not self._need_update() or self.finished:
return
if not self.start_time:
self.start_time = time.time()
self.seconds_elapsed = time.time() - self.start_time
self.prev_percentage = self.percentage()
if value != self.maxval:
self.fd.write(self._format_line() + '\r')
else:
self.finished = True
self.fd.write(self._format_line() + '\n')
def start(self):
"""Start measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
self.update(0)
return self
def finish(self):
"""Used to tell the progress is finished."""
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
class MyBar:
"""Encapsulation of a nice progress bar"""
def __init__(self,text):
self.text=text
def init(self,max):
widgets=[self.text, ' ', Percentage(), ' ', Bar(), ' ', ETA()]
self.pbar=ProgressBar(widgets=widgets,maxval=max).start()
def update(self,i):
self.pbar.update(i)
if __name__=='__main__':
import os
def example1():
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example2():
class CrazyFileTransferSpeed(FileTransferSpeed):
"It's bigger between 45 and 80 percent"
def update(self, pbar):
if 45 < pbar.percentage() < 80:
return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
else:
return FileTransferSpeed.update(self,pbar)
widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=10000000)
# maybe do something
pbar.start()
for i in range(2000000):
# do something
pbar.update(5*i+1)
pbar.finish()
print
def example3():
widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example4():
widgets = ['Test: ', Percentage(), ' ',
Bar(marker='0',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=500)
pbar.start()
for i in range(100,500+1,50):
time.sleep(0.2)
pbar.update(i)
pbar.finish()
print
example1()
example2()
example3()
example4()
| Python |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#
# progressbar - Text progressbar library for python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Text progressbar library for python.
This library provides a text mode progressbar. This is tipically used
to display the progress of a long running operation, providing a
visual clue that processing is underway.
The ProgressBar class manages the progress, and the format of the line
is given by a number of widgets. A widget is an object that may
display diferently depending on the state of the progress. There are
three types of widget:
- a string, which always shows itself;
- a ProgressBarWidget, which may return a diferent value every time
it's update method is called; and
- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
expands to fill the remaining width of the line.
The progressbar module is very easy to use, yet very powerful. And
automatically supports features like auto-resizing when available.
"""
__author__ = "Nilton Volpato"
__author_email__ = "first-name dot last-name @ gmail.com"
__date__ = "2006-05-07"
__version__ = "2.2"
# Changelog
#
# 2006-05-07: v2.2 fixed bug in windows
# 2005-12-04: v2.1 autodetect terminal width, added start method
# 2005-12-04: v2.0 everything is now a widget (wow!)
# 2005-12-03: v1.0 rewrite using widgets
# 2005-06-02: v0.5 rewrite
# 2004-??-??: v0.1 first version
import sys, time
from array import array
try:
from fcntl import ioctl
import termios
except ImportError:
pass
import signal
class ProgressBarWidget(object):
"""This is an element of ProgressBar formatting.
The ProgressBar object will call it's update value when an update
is needed. It's size may change between call, but the results will
not be good if the size changes drastically and repeatedly.
"""
def update(self, pbar):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made.
At least this function must be overriden."""
pass
class ProgressBarWidgetHFill(object):
"""This is a variable width element of ProgressBar formatting.
The ProgressBar object will call it's update value, informing the
width this object must the made. This is like TeX \\hfill, it will
expand to fill the line. You can use more than one in the same
line, and they will all have the same width, and together will
fill the line.
"""
def update(self, pbar, width):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made. The parameter width is the total
horizontal width the widget must have.
At least this function must be overriden."""
pass
class ETA(ProgressBarWidget):
"Widget for the Estimated Time of Arrival"
def format_time(self, seconds):
return time.strftime('%H:%M:%S', time.gmtime(seconds))
def update(self, pbar):
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
eta = elapsed * pbar.maxval / pbar.currval - elapsed
return 'ETA: %s' % self.format_time(eta)
class FileTransferSpeed(ProgressBarWidget):
"Widget for showing the transfer speed (useful for file transfers)."
def __init__(self):
self.fmt = '%6.2f %s'
self.units = ['B','K','M','G','T','P']
def update(self, pbar):
if pbar.seconds_elapsed < 2e-6:#== 0:
bps = 0.0
else:
bps = float(pbar.currval) / pbar.seconds_elapsed
spd = bps
for u in self.units:
if spd < 1000:
break
spd /= 1000
return self.fmt % (spd, u+'/s')
class RotatingMarker(ProgressBarWidget):
"A rotating marker for filling the bar of progress."
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
if pbar.finished:
return self.markers[0]
self.curmark = (self.curmark + 1)%len(self.markers)
return self.markers[self.curmark]
class Percentage(ProgressBarWidget):
"Just the percentage done."
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class Bar(ProgressBarWidgetHFill):
"The bar of progress. It will strech to fill the line."
def __init__(self, marker='#', left='|', right='|'):
self.marker = marker
self.left = left
self.right = right
def _format_marker(self, pbar):
if isinstance(self.marker, (str, unicode)):
return self.marker
else:
return self.marker.update(pbar)
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
return bar
class ReverseBar(Bar):
"The reverse bar of progress, or bar of regress. :)"
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
return bar
default_widgets = [Percentage(), ' ', Bar()]
class ProgressBar(object):
"""This is the ProgressBar class, it updates and prints the bar.
The term_width parameter may be an integer. Or None, in which case
it will try to guess it, if it fails it will default to 80 columns.
The simple use is like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
But anything you want to do is possible (well, almost anything).
You can supply different widgets of any type in any order. And you
can even write your own widgets! There are many widgets already
shipped and you should experiment with them.
When implementing a widget update method you may access any
attribute or function of the ProgressBar object calling the
widget's update method. The most important attributes you would
like to access are:
- currval: current value of the progress, 0 <= currval <= maxval
- maxval: maximum (and final) value of the progress
- finished: True if the bar is have finished (reached 100%), False o/w
- start_time: first time update() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time
- percentage(): percentage of the progress (this is a method)
"""
def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
fd=sys.stderr):
assert maxval > 0
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.signal_set = False
if term_width is None:
try:
self.handle_resize(None,None)
signal.signal(signal.SIGWINCH, self.handle_resize)
self.signal_set = True
except:
self.term_width = 79
else:
self.term_width = term_width
self.currval = 0
self.finished = False
self.prev_percentage = -1
self.start_time = None
self.seconds_elapsed = 0
def handle_resize(self, signum, frame):
h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
self.term_width = w
def percentage(self):
"Returns the percentage of the progress."
return self.currval*100.0 / self.maxval
def _format_widgets(self):
r = []
hfill_inds = []
num_hfill = 0
currwidth = 0
for i, w in enumerate(self.widgets):
if isinstance(w, ProgressBarWidgetHFill):
r.append(w)
hfill_inds.append(i)
num_hfill += 1
elif isinstance(w, (str, unicode)):
r.append(w)
currwidth += len(w)
else:
weval = w.update(self)
currwidth += len(weval)
r.append(weval)
for iw in hfill_inds:
r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
return r
def _format_line(self):
return ''.join(self._format_widgets()).ljust(self.term_width)
def _need_update(self):
return int(self.percentage()) != int(self.prev_percentage)
def update(self, value):
"Updates the progress bar to a new value."
assert 0 <= value <= self.maxval
self.currval = value
if not self._need_update() or self.finished:
return
if not self.start_time:
self.start_time = time.time()
self.seconds_elapsed = time.time() - self.start_time
self.prev_percentage = self.percentage()
if value != self.maxval:
self.fd.write(self._format_line() + '\r')
else:
self.finished = True
self.fd.write(self._format_line() + '\n')
def start(self):
"""Start measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
self.update(0)
return self
def finish(self):
"""Used to tell the progress is finished."""
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
class MyBar:
"""Encapsulation of a nice progress bar"""
def __init__(self,text):
self.text=text
def init(self,max):
widgets=[self.text, ' ', Percentage(), ' ', Bar(), ' ', ETA()]
self.pbar=ProgressBar(widgets=widgets,maxval=max).start()
def update(self,i):
self.pbar.update(i)
if __name__=='__main__':
import os
def example1():
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example2():
class CrazyFileTransferSpeed(FileTransferSpeed):
"It's bigger between 45 and 80 percent"
def update(self, pbar):
if 45 < pbar.percentage() < 80:
return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
else:
return FileTransferSpeed.update(self,pbar)
widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=10000000)
# maybe do something
pbar.start()
for i in range(2000000):
# do something
pbar.update(5*i+1)
pbar.finish()
print
def example3():
widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example4():
widgets = ['Test: ', Percentage(), ' ',
Bar(marker='0',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=500)
pbar.start()
for i in range(100,500+1,50):
time.sleep(0.2)
pbar.update(i)
pbar.finish()
print
example1()
example2()
example3()
example4()
| Python |
import math
import geometry as geom
from meshutils import mesh
import progressbar
def numlist2str(x):
s=""
for i in x:
s+="%d "%i
return s[:-1]
def getinsidepoint(pts):
direct=(pts[0]+pts[1]+pts[2])/3-pts[0]
return pts[0]+0.001*direct
def write_tetgen(g,filename):
g.leaveonlyphysicalvolumes()
#nodes
nodes=[]
map={}
for x in g.d0.values():
assert isinstance(x,geom.point)
nodes.append(x.getxyz())
map[x.getn()]=len(nodes)
s="%d 3\n"%len(nodes)
for n,x in enumerate(nodes):
s+="%d %f %f %f\n"%tuple([n+1]+list(x))
#facets
#first write external polygon, then hole polygons and then point in each
#hole polygon
facets=[]
for x in g.d2.values():
assert isinstance(x,geom.surface)
p=[map[y.getn()] for y in x.getpoints()]
h=[]
pts=[]
for hole in x.getholepoints():
h.append([map[y.getn()] for y in hole])
pts.append(getinsidepoint(hole).getxyz())
bc=g.getBCnum(x.getn())
facets.append((p,bc,h,pts))
# # of facets, boundary markers=yes
s+="\n%d 1\n"%len(facets)
for p,bc,h,holes in facets:
# # of polygons, # of holes, boundary marker
s+="%d %d %d\n"%(1+len(h),len(h),bc)
# # of corners, corner 1, corner 2, ...
s+="%d %s\n"%(len(p),numlist2str(p))
for x in h:
# # of corners, corner 1, corner 2, ...
s+="%d %s\n"%(len(x),numlist2str(x))
for i,pt in enumerate(holes):
# hole #, x, y, z
s+="%d %f %f %f\n"%(i+1,pt[0],pt[1],pt[2])
#volume holes
s+="\n0\n"
#regions
regions=[]
for x in g.phys3.values():
assert isinstance(x,geom.physicalvolume)
for v in x.getvolumes():
regions.append(v.getinsidepoint().getxyz()+[x.getn()])
s+="\n%d\n"%len(regions)
for i,x in enumerate(regions):
s+="%d %f %f %f %d\n"%(i+1,x[0],x[1],x[2],x[3])
open(filename,"w").write(s)
def read_tetgen(fname,verbose=True):
def getnodes(fnods,up):
f=file(fnods)
l=[int(x) for x in f.readline().split()]
npoints,dim,nattrib,nbound=l
assert dim==3
if verbose: up.init(npoints)
nodes=[]
for line in f:
if line[0]=="#": continue
l=[float(x) for x in line.split()]
l[0]=int(l[0])
nodes.append(tuple(l))
assert l[0]==len(nodes)
if verbose: up.update(l[0])
assert npoints==len(nodes)
return nodes
def getele(fele,up):
f=file(fele)
l=[int(x) for x in f.readline().split()]
ntetra,nnod,nattrib=l
#we have either linear or quadratic tetrahedra:
assert nnod in [4,10]
linear= (nnod==4)
if verbose: up.init(ntetra)
if nattrib!=1:
raise "tetgen didn't assign an entity number to each element \
(option -A)"
els=[]
regions={}
for line in f:
if line[0]=="#": continue
l=[int(x) for x in line.split()]
if linear:
assert len(l)-2 == 4
els.append((l[0],54,l[1],l[2],l[3],l[4]))
regionnum=l[5]
else:
assert len(l)-2 == 10
els.append((l[0],54,l[1],l[2],l[3],l[4],
l[5],l[6],l[7],l[8],l[9],l[10]))
regionnum=l[11]
if regionnum==0:
print "see %s, element # %d"%(fele,l[0])
raise "there are elements not belonging to any physical entity"
if regions.has_key(regionnum):
regions[regionnum].append(l[0])
else:
regions[regionnum]=[l[0]]
assert l[0]==len(els)
if verbose: up.update(l[0])
return els,regions,linear
def getBCfaces(ffaces,up):
f=file(ffaces)
l=[int(x) for x in f.readline().split()]
nfaces,nattrib=l
if nattrib!=1:
raise "tetgen didn't assign an entity number to each face \
(option -A)"
if verbose: up.init(nfaces)
faces={}
for line in f:
if line[0]=="#": continue
l=[int(x) for x in line.split()]
assert len(l)==5
regionnum=l[4]
if regionnum==0: continue
if faces.has_key(regionnum):
faces[regionnum].append((l[1],l[2],l[3]))
else:
faces[regionnum]=[(l[1],l[2],l[3])]
if verbose: up.update(l[0])
return faces
def calculatexyz(nodes, els):
"""Calculate the missing xyz values in place"""
def avg(i,j,n4,nodes):
a=nodes[n4[i-1]-1]
b=nodes[n4[j-1]-1]
return (a[1]+b[1])/2, (a[2]+b[2])/2, (a[3]+b[3])/2
def getxyz(i,n4,nodes):
if i+5==5: return avg(1,2,n4,nodes)
if i+5==6: return avg(2,3,n4,nodes)
if i+5==7: return avg(1,3,n4,nodes)
if i+5==8: return avg(1,4,n4,nodes)
if i+5==9: return avg(2,4,n4,nodes)
if i+5==10: return avg(3,4,n4,nodes)
raise "wrong topology"
for e in els:
n4=e[2:2+4]
n6=e[2+4:2+4+10]
for i,n in enumerate(n6):
x,y,z=getxyz(i,n4,nodes)
nodes[n-1]=(n,x,y,z)
if verbose: print "Reading mesh from tetgen..."
m=mesh()
m.nodes=getnodes(fname+".node",progressbar.MyBar(" nodes:"))
m.elements,m.regions, lin=getele(fname+".ele",
progressbar.MyBar(" elements:"))
if not lin:
#tetgen doesn't compute xyz coordinates of the aditional 6 nodes
#(only of the 4 corner nodes) in tetrahedra.
calculatexyz(m.nodes,m.elements)
m.faces=getBCfaces(fname+".face",progressbar.MyBar(" BC:"))
return m
def runtetgen(tetgenpath,filename,a=None,Q=None,quadratic=False,verbose=True,
refine=False):
"""Runs tetgen.
tetgenpath ... the tetgen executable with a full path
filename ... the input file for tetgen (for example /tmp/t.poly)
a ... a maximum tetrahedron volume constraint
Q ... a minimum radius-edge ratio, tetgen default is 2.0
quadratic ... False - generate linear elements, True - quadratic elements
"""
import pexpect
if not refine:
cmd = "%s -pQAq" % (tetgenpath)
else:
cmd = "%s -rQAq" % (tetgenpath)
if Q!=None:
cmd=cmd+"%f"%Q
if a!=None and not refine:
cmd=cmd+" -a%f"%(a)
if refine:
cmd=cmd+" -a"
if quadratic:
cmd=cmd+" -o2"
cmd=cmd+" %s"%(filename)
if verbose: print "Generating mesh using", cmd
p=pexpect.spawn(cmd,timeout=None)
if not refine:
p.expect("Opening %s."%(filename))
else:
p.expect("Opening %s.node.\r\n"%(filename))
p.expect("Opening %s.ele.\r\n"%(filename))
p.expect("Opening %s.face.\r\n"%(filename))
p.expect("Opening %s.vol."%(filename))
assert p.before==""
p.expect(pexpect.EOF)
if p.before!="\r\n":
print p.before
raise "Error when running tetgen (see above for output): %s"%cmd
| Python |
class geometry(object):
"""The geometry is given by a sets of points (d0), lines (d1), surfaces
(d2) and volumes (d3). A lines are constructed from 2 points, a surface from
any number of lines, a volume from any number of surfaces.
Physical volumes are contruted from any number of volumes.
The self.d0, self.d1, self.d2 and self.d3 are dictionaries holding a map
geometry element number -> instance of point,line,surface of volume
Example:
========
To get all the points which define a surface 5, use:
self.d2[5].getpoints()
This would give you a list [..] of point() instances.
"""
def __init__(self):
self.d0={}
self.d1={}
self.d2={}
self.d3={}
self.phys2={}
self.phys3={}
def addpoint(self,n,p):
"p=[x,y,z]"
o=point(self,n,p)
self.d0[o.getn()]=o
def addline(self,n,l):
"l=[p1,p2]"
o=line(self,n,l)
self.d1[o.getn()]=o
def addsurface(self,n,s):
"s=[l1,l2,l3,...]"
o=surface(self,n,s)
self.d2[o.getn()]=o
def addvolume(self,n,v):
"v=[s1,s2,s3,...]"
o=volume(self,n,v)
self.d3[o.getn()]=o
def addphysicalsurface(self,n,surfacelist):
"surfacelist=[v1,v2,v3,...]"
o=physicalsurface(self,n,surfacelist)
self.phys2[o.getn()]=o
def addphysicalvolume(self,n,volumelist):
"volumelist=[v1,v2,v3,...]"
o=physicalvolume(self,n,volumelist)
self.phys3[o.getn()]=o
def getBCnum(self,snum):
for x in self.phys2:
if snum in self.phys2[x].surfaces:
return x
return 0
def printinfo(self):
print "General geometry information:"
print " points:",len(self.d0)
print " lines:",len(self.d1)
print " surfaces:",len(self.d2)
print " volumes:",len(self.d3)
print "Physical entities:"
print " surfaces (boundary conditions):"
for d in self.phys2.values():
print " %d: surface numbers %r"%(d.getn(),d.surfaces)
print " volumes (regions):"
for d in self.phys3.values():
print " %d: volume numbers %r"%(d.getn(),d.volumes)
def leaveonlyphysicalvolumes(self):
points={}
lines={}
surfaces={}
volumes={}
for e in self.phys3:
for v in self.phys3[e].getvolumes():
volumes[v.getn()]=v
for s in v.getsurfaces():
surfaces[s.getn()]=s
for l in s.getlines():
lines[l.getn()]=l
for p in l.getpoints():
points[p.getn()]=p
self.d0=points
self.d1=lines
self.d2=surfaces
self.d3=volumes
class geomobject(object):
def getn(self):
return self.n
class point(geomobject):
def __init__(self,g,n,p):
self.geom=g
self.n=n
self.p=p
def __add__(self,p):
return point(self.geom,-1,[a+b for a,b in zip(self.p,p.p)])
def __sub__(self,p):
return point(self.geom,-1,[a-b for a,b in zip(self.p,p.p)])
def __div__(self,num):
return point(self.geom,-1,[a/num for a in self.p])
def __mul__(self,num):
return point(self.geom,-1,[a*num for a in self.p])
def __rmul__(self,num):
return self.__mul__(num)
def getxyz(self):
return self.p
def getstr(self):
return "%f, %f, %f"%self.getxyz()
class line(geomobject):
def __init__(self,g,n,l):
self.geom=g
self.n=n
self.points=l
def getpoints(self):
return [self.geom.d0[x] for x in self.points]
class surface(geomobject):
def __init__(self,g,n,s):
self.geom=g
self.n=n
self.lines,self.holes=self.separate(s)
def separate(self,s):
#FIXME - this is just a quick hack to satisfy all the examples
if len(s)<=4:
return s,[]
elif len(s)==8:
return s[:4],[s[4:]]
else:
return s,[]
def getlines(self):
return [self.geom.d1[abs(x)] for x in self.lines]
def getpoints(self):
#self.lines contains the numbers of all the lines
def p(idx):
"Return the correct point of the line 'idx'"
if idx>0:
return self.geom.d1[idx].getpoints()[0]
else:
return self.geom.d1[-idx].getpoints()[1]
return [p(x) for x in self.lines]
def getholepoints(self):
def p(idx):
"Return the correct point of the line 'idx'"
if idx>0:
return self.geom.d1[idx].getpoints()[0]
else:
return self.geom.d1[-idx].getpoints()[1]
r=[]
for hole in self.holes:
r.append([p(x) for x in hole])
return r
def getinsidepoint(self):
pts=self.getpoints()[:3]
return (pts[0]+pts[1]+pts[2])/3
class volume(geomobject):
def __init__(self,g,n,v):
self.geom=g
self.n=n
self.surfaces=v
def getsurfaces(self):
return [self.geom.d2[abs(x)] for x in self.surfaces]
def getinsidepoint(self):
sfs=self.getsurfaces()[:3]
pts=[s.getinsidepoint() for s in sfs]
p0=sfs[0].getpoints()[0]
direct=(pts[0]+pts[1]+pts[2])/3-p0
return p0+0.001*direct
class physicalsurface(geomobject):
def __init__(self,g,n,s):
self.geom=g
self.n=n
self.surfaces=s
def getsurfaces(self):
return [self.geom.d2[x] for x in self.surfaces]
class physicalvolume(geomobject):
def __init__(self,g,n,v):
self.geom=g
self.n=n
self.volumes=v
def getvolumes(self):
return [self.geom.d3[x] for x in self.volumes]
| Python |
from gmsh import read_gmsh
from femlab import write_femlab
from tetgen import read_tetgen,write_tetgen,runtetgen
| Python |
#! /usr/bin/env python
#
# SCons - a Software Constructor
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/script/sconsign.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__version__ = "2.2.0"
__build__ = "issue-2856:2676:d23b7a2f45e8[MODIFIED]"
__buildsys__ = "oberbrunner-dev"
__date__ = "2012/08/05 15:38:28"
__developer__ = "garyo"
import os
import sys
##############################################################################
# BEGIN STANDARD SCons SCRIPT HEADER
#
# This is the cut-and-paste logic so that a self-contained script can
# interoperate correctly with different SCons versions and installation
# locations for the engine. If you modify anything in this section, you
# should also change other scripts that use this same header.
##############################################################################
# Strip the script directory from sys.path() so on case-insensitive
# (WIN32) systems Python doesn't think that the "scons" script is the
# "SCons" package. Replace it with our own library directories
# (version-specific first, in case they installed by hand there,
# followed by generic) so we pick up the right version of the build
# engine modules if they're in either directory.
script_dir = sys.path[0]
if script_dir in sys.path:
sys.path.remove(script_dir)
libs = []
if "SCONS_LIB_DIR" in os.environ:
libs.append(os.environ["SCONS_LIB_DIR"])
local_version = 'scons-local-' + __version__
local = 'scons-local'
if script_dir:
local_version = os.path.join(script_dir, local_version)
local = os.path.join(script_dir, local)
libs.append(os.path.abspath(local_version))
libs.append(os.path.abspath(local))
scons_version = 'scons-%s' % __version__
# preferred order of scons lookup paths
prefs = []
try:
import pkg_resources
except ImportError:
pass
else:
# when running from an egg add the egg's directory
try:
d = pkg_resources.get_distribution('scons')
except pkg_resources.DistributionNotFound:
pass
else:
prefs.append(d.location)
if sys.platform == 'win32':
# sys.prefix is (likely) C:\Python*;
# check only C:\Python*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, 'Lib', 'site-packages'))
else:
# On other (POSIX) platforms, things are more complicated due to
# the variety of path names and library locations. Try to be smart
# about it.
if script_dir == 'bin':
# script_dir is `pwd`/bin;
# check `pwd`/lib/scons*.
prefs.append(os.getcwd())
else:
if script_dir == '.' or script_dir == '':
script_dir = os.getcwd()
head, tail = os.path.split(script_dir)
if tail == "bin":
# script_dir is /foo/bin;
# check /foo/lib/scons*.
prefs.append(head)
head, tail = os.path.split(sys.prefix)
if tail == "usr":
# sys.prefix is /foo/usr;
# check /foo/usr/lib/scons* first,
# then /foo/usr/local/lib/scons*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, "local"))
elif tail == "local":
h, t = os.path.split(head)
if t == "usr":
# sys.prefix is /foo/usr/local;
# check /foo/usr/local/lib/scons* first,
# then /foo/usr/lib/scons*.
prefs.append(sys.prefix)
prefs.append(head)
else:
# sys.prefix is /foo/local;
# check only /foo/local/lib/scons*.
prefs.append(sys.prefix)
else:
# sys.prefix is /foo (ends in neither /usr or /local);
# check only /foo/lib/scons*.
prefs.append(sys.prefix)
temp = [os.path.join(x, 'lib') for x in prefs]
temp.extend([os.path.join(x,
'lib',
'python' + sys.version[:3],
'site-packages') for x in prefs])
prefs = temp
# Add the parent directory of the current python's library to the
# preferences. On SuSE-91/AMD64, for example, this is /usr/lib64,
# not /usr/lib.
try:
libpath = os.__file__
except AttributeError:
pass
else:
# Split /usr/libfoo/python*/os.py to /usr/libfoo/python*.
libpath, tail = os.path.split(libpath)
# Split /usr/libfoo/python* to /usr/libfoo
libpath, tail = os.path.split(libpath)
# Check /usr/libfoo/scons*.
prefs.append(libpath)
# Look first for 'scons-__version__' in all of our preference libs,
# then for 'scons'.
libs.extend([os.path.join(x, scons_version) for x in prefs])
libs.extend([os.path.join(x, 'scons') for x in prefs])
sys.path = libs + sys.path
##############################################################################
# END STANDARD SCons SCRIPT HEADER
##############################################################################
import SCons.compat # so pickle will import cPickle instead
import whichdb
import time
import pickle
import imp
import SCons.SConsign
def my_whichdb(filename):
if filename[-7:] == ".dblite":
return "SCons.dblite"
try:
f = open(filename + ".dblite", "rb")
f.close()
return "SCons.dblite"
except IOError:
pass
return _orig_whichdb(filename)
_orig_whichdb = whichdb.whichdb
whichdb.whichdb = my_whichdb
def my_import(mname):
if '.' in mname:
i = mname.rfind('.')
parent = my_import(mname[:i])
fp, pathname, description = imp.find_module(mname[i+1:],
parent.__path__)
else:
fp, pathname, description = imp.find_module(mname)
return imp.load_module(mname, fp, pathname, description)
class Flagger(object):
default_value = 1
def __setitem__(self, item, value):
self.__dict__[item] = value
self.default_value = 0
def __getitem__(self, item):
return self.__dict__.get(item, self.default_value)
Do_Call = None
Print_Directories = []
Print_Entries = []
Print_Flags = Flagger()
Verbose = 0
Readable = 0
def default_mapper(entry, name):
try:
val = eval("entry."+name)
except:
val = None
return str(val)
def map_action(entry, name):
try:
bact = entry.bact
bactsig = entry.bactsig
except AttributeError:
return None
return '%s [%s]' % (bactsig, bact)
def map_timestamp(entry, name):
try:
timestamp = entry.timestamp
except AttributeError:
timestamp = None
if Readable and timestamp:
return "'" + time.ctime(timestamp) + "'"
else:
return str(timestamp)
def map_bkids(entry, name):
try:
bkids = entry.bsources + entry.bdepends + entry.bimplicit
bkidsigs = entry.bsourcesigs + entry.bdependsigs + entry.bimplicitsigs
except AttributeError:
return None
result = []
for i in range(len(bkids)):
result.append(nodeinfo_string(bkids[i], bkidsigs[i], " "))
if result == []:
return None
return "\n ".join(result)
map_field = {
'action' : map_action,
'timestamp' : map_timestamp,
'bkids' : map_bkids,
}
map_name = {
'implicit' : 'bkids',
}
def field(name, entry, verbose=Verbose):
if not Print_Flags[name]:
return None
fieldname = map_name.get(name, name)
mapper = map_field.get(fieldname, default_mapper)
val = mapper(entry, name)
if verbose:
val = name + ": " + val
return val
def nodeinfo_raw(name, ninfo, prefix=""):
# This just formats the dictionary, which we would normally use str()
# to do, except that we want the keys sorted for deterministic output.
d = ninfo.__dict__
try:
keys = ninfo.field_list + ['_version_id']
except AttributeError:
keys = sorted(d.keys())
l = []
for k in keys:
l.append('%s: %s' % (repr(k), repr(d.get(k))))
if '\n' in name:
name = repr(name)
return name + ': {' + ', '.join(l) + '}'
def nodeinfo_cooked(name, ninfo, prefix=""):
try:
field_list = ninfo.field_list
except AttributeError:
field_list = []
if '\n' in name:
name = repr(name)
outlist = [name+':'] + [_f for _f in [field(x, ninfo, Verbose) for x in field_list] if _f]
if Verbose:
sep = '\n ' + prefix
else:
sep = ' '
return sep.join(outlist)
nodeinfo_string = nodeinfo_cooked
def printfield(name, entry, prefix=""):
outlist = field("implicit", entry, 0)
if outlist:
if Verbose:
print " implicit:"
print " " + outlist
outact = field("action", entry, 0)
if outact:
if Verbose:
print " action: " + outact
else:
print " " + outact
def printentries(entries, location):
if Print_Entries:
for name in Print_Entries:
try:
entry = entries[name]
except KeyError:
sys.stderr.write("sconsign: no entry `%s' in `%s'\n" % (name, location))
else:
try:
ninfo = entry.ninfo
except AttributeError:
print name + ":"
else:
print nodeinfo_string(name, entry.ninfo)
printfield(name, entry.binfo)
else:
for name in sorted(entries.keys()):
entry = entries[name]
try:
ninfo = entry.ninfo
except AttributeError:
print name + ":"
else:
print nodeinfo_string(name, entry.ninfo)
printfield(name, entry.binfo)
class Do_SConsignDB(object):
def __init__(self, dbm_name, dbm):
self.dbm_name = dbm_name
self.dbm = dbm
def __call__(self, fname):
# The *dbm modules stick their own file suffixes on the names
# that are passed in. This is causes us to jump through some
# hoops here to be able to allow the user
try:
# Try opening the specified file name. Example:
# SPECIFIED OPENED BY self.dbm.open()
# --------- -------------------------
# .sconsign => .sconsign.dblite
# .sconsign.dblite => .sconsign.dblite.dblite
db = self.dbm.open(fname, "r")
except (IOError, OSError), e:
print_e = e
try:
# That didn't work, so try opening the base name,
# so that if the actually passed in 'sconsign.dblite'
# (for example), the dbm module will put the suffix back
# on for us and open it anyway.
db = self.dbm.open(os.path.splitext(fname)[0], "r")
except (IOError, OSError):
# That didn't work either. See if the file name
# they specified just exists (independent of the dbm
# suffix-mangling).
try:
open(fname, "r")
except (IOError, OSError), e:
# Nope, that file doesn't even exist, so report that
# fact back.
print_e = e
sys.stderr.write("sconsign: %s\n" % (print_e))
return
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s'\n" % (self.dbm_name, fname))
return
except Exception, e:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s': %s\n" % (self.dbm_name, fname, e))
return
if Print_Directories:
for dir in Print_Directories:
try:
val = db[dir]
except KeyError:
sys.stderr.write("sconsign: no dir `%s' in `%s'\n" % (dir, args[0]))
else:
self.printentries(dir, val)
else:
for dir in sorted(db.keys()):
self.printentries(dir, db[dir])
def printentries(self, dir, val):
print '=== ' + dir + ':'
printentries(pickle.loads(val), dir)
def Do_SConsignDir(name):
try:
fp = open(name, 'rb')
except (IOError, OSError), e:
sys.stderr.write("sconsign: %s\n" % (e))
return
try:
sconsign = SCons.SConsign.Dir(fp)
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid .sconsign file `%s'\n" % (name))
return
except Exception, e:
sys.stderr.write("sconsign: ignoring invalid .sconsign file `%s': %s\n" % (name, e))
return
printentries(sconsign.entries, args[0])
##############################################################################
import getopt
helpstr = """\
Usage: sconsign [OPTIONS] FILE [...]
Options:
-a, --act, --action Print build action information.
-c, --csig Print content signature information.
-d DIR, --dir=DIR Print only info about DIR.
-e ENTRY, --entry=ENTRY Print only info about ENTRY.
-f FORMAT, --format=FORMAT FILE is in the specified FORMAT.
-h, --help Print this message and exit.
-i, --implicit Print implicit dependency information.
-r, --readable Print timestamps in human-readable form.
--raw Print raw Python object representations.
-s, --size Print file sizes.
-t, --timestamp Print timestamp information.
-v, --verbose Verbose, describe each field.
"""
opts, args = getopt.getopt(sys.argv[1:], "acd:e:f:hirstv",
['act', 'action',
'csig', 'dir=', 'entry=',
'format=', 'help', 'implicit',
'raw', 'readable',
'size', 'timestamp', 'verbose'])
for o, a in opts:
if o in ('-a', '--act', '--action'):
Print_Flags['action'] = 1
elif o in ('-c', '--csig'):
Print_Flags['csig'] = 1
elif o in ('-d', '--dir'):
Print_Directories.append(a)
elif o in ('-e', '--entry'):
Print_Entries.append(a)
elif o in ('-f', '--format'):
Module_Map = {'dblite' : 'SCons.dblite',
'sconsign' : None}
dbm_name = Module_Map.get(a, a)
if dbm_name:
try:
dbm = my_import(dbm_name)
except:
sys.stderr.write("sconsign: illegal file format `%s'\n" % a)
print helpstr
sys.exit(2)
Do_Call = Do_SConsignDB(a, dbm)
else:
Do_Call = Do_SConsignDir
elif o in ('-h', '--help'):
print helpstr
sys.exit(0)
elif o in ('-i', '--implicit'):
Print_Flags['implicit'] = 1
elif o in ('--raw',):
nodeinfo_string = nodeinfo_raw
elif o in ('-r', '--readable'):
Readable = 1
elif o in ('-s', '--size'):
Print_Flags['size'] = 1
elif o in ('-t', '--timestamp'):
Print_Flags['timestamp'] = 1
elif o in ('-v', '--verbose'):
Verbose = 1
if Do_Call:
for a in args:
Do_Call(a)
else:
for a in args:
dbm_name = whichdb.whichdb(a)
if dbm_name:
Map_Module = {'SCons.dblite' : 'dblite'}
dbm = my_import(dbm_name)
Do_SConsignDB(Map_Module.get(dbm_name, dbm_name), dbm)(a)
else:
Do_SConsignDir(a)
sys.exit(0)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
#!/usr/bin/env python
'''
mm extern photivo.py
Passes an image to Photivo
Author:
Michael Munzert (mike photivo org)
Bernd Schoeler (brother.john photivo org)
Version:
2011.08.28 Brother John: Adjust Photivo cli
2011.01.29 Brother John: Ask user for photivo.exe and store in gimprc
2011.01.27 Brother John: Fixed failing execution of Photivo on Windows.
2011.01.02 mike: Initial version.
modelled after the trace plugin (lloyd konneker, lkk, bootch at nc.rr.com)
License:
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
The GNU Public License is available at
http://www.gnu.org/copyleft/gpl.html
'''
from gimpfu import *
from platform import system
import os
import subprocess
import Tkinter, tkFileDialog
def plugin_main(image, drawable, visible):
# Copy so the save operations doesn't affect the original
tempimage = pdb.gimp_image_duplicate(image)
if not tempimage:
raise RuntimeError
# Use temp file names from gimp, it reflects the user's choices in gimp.rc
tempfilename = pdb.gimp_temp_name("tif")
if visible == 0:
# Save in temporary. Note: empty user entered file name
tempdrawable = pdb.gimp_image_get_active_drawable(tempimage)
else:
# Get the current visible
tempdrawable = pdb.gimp_layer_new_from_visible(image, tempimage, "visible")
# !!! Note no run-mode first parameter, and user entered filename is empty string
pdb.gimp_progress_set_text ("Saving a copy")
pdb.gimp_file_save(tempimage, tempdrawable, tempfilename, "")
# cleanup
gimp.delete(tempimage) # delete the temporary image
# Platform dependent full command string for Photivo.
if system() == "Linux":
# We can assume Photivo can be called with a simple photivo.
command = 'photivo --load-and-delete "%s"' % (tempfilename)
elif system() == "Windows":
# There is no way to call Photivo without knowing exactly where it is installed.
# So we ask the user for the path to photivo.exe and store it in the user's gimprc.
cmdWindows = ""
try:
cmdWindows = pdb.gimp_gimprc_query("photivo-executable")
except RuntimeError: # Catch ExecutionError when the key is not found in gimprc
pass
if not os.path.exists(cmdWindows):
root = Tkinter.Tk()
root.withdraw() # Hide the Tkinter main window so only the file dialog shows
cmdWindows = tkFileDialog.askopenfilename(
parent = None,
title = "Where is photivo.exe located?",
filetypes = [('photivo.exe','photivo.exe')],
initialdir = "C:\\"
)
dummy = pdb.gimp_gimprc_set("photivo-executable", cmdWindows)
command = '"%s" --load-and-delete "%s"' % (cmdWindows, tempfilename)
# Invoke Photivo.
pdb.gimp_progress_set_text(command)
pdb.gimp_progress_pulse()
if system() == "Windows":
child = subprocess.Popen(command)
elif system() == "Linux":
child = subprocess.Popen(command, shell = True)
register(
"python_fu_mm_extern_photivo",
"Pass the image to Photivo.",
"Pass the image to Photivo.",
"Michael Munzert (mike photivo org)",
"Copyright 2011 Michael Munzert",
"2011",
"<Image>/Filters/MM-Filters/_Export to Photivo ...",
"*",
[ (PF_RADIO, "visible", "Layer:", 1, (("new from visible", 1),("current layer",0)))
],
[],
plugin_main,
)
main()
| Python |
#!/usr/bin/env python
################################################################################
##
## Photivo
##
## Copyright (C) 2013 Sergey Salnikov <salsergey@gmail.com>
##
## This file is part of Photivo.
##
## Photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## Photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
#
# This script generates CMakeLists.txt file. The script extracts all
# sources, headers and UIs from the photivoProject/photivoProject.pro
# file and adds them to CMakeLists.txt.in.
#
################################################################################
import sys
import os.path
import re
# Function to find if the source should be added.
def test_source(filename):
if filename.endswith('cpp') and not re.match('.*qtlockedfile.*', filename):
return True
else:
return False
# Function to find if the header file should be MOCed.
def test_header(filename):
file = open(filename)
for line in file:
if re.match('.*Q_OBJECT.*', line):
return True
return False
# Function that extracts the path to a file.
# The returned value means if the file list continues.
def match_to_path(files, line, test_function=None):
if line.endswith('\\'):
result = True
else:
result = False
if not re.match('^#', line):
line = re.split('\\\$', line)[0].strip()
if re.match('.*\.\./', line):
line = re.split('\.\./', line)[1]
if test_function == None or test_function(line):
files.append(line)
return result
# set the working directory to that containing this script
os.chdir(os.path.dirname(sys.argv[0]))
if not os.path.exists('CMakeLists.txt.in'):
print 'File CMakeLists.txt.in doesn\'t exist.'
exit(1)
if not os.path.exists('photivoProject/photivoProject.pro'):
print 'File photivoProject/photivoProject.pro doesn\'t exist.'
exit(1)
cmake_in = open('CMakeLists.txt.in', 'r')
qmake_pro = open('photivoProject/photivoProject.pro', 'r')
cmake_out = open('CMakeLists.txt', 'w')
sources = []
headers = []
uis = []
skip = False
copy_src = False
copy_hdr = False
copy_ui = False
for line in qmake_pro:
line = line.strip()
# these lines correspond to win32 only and we skip them
if re.match('win32', line):
skip = True
# the end of the win32 section
if re.match('}', line):
skip = False
if skip:
continue
# sources section found
if re.match('SOURCES', line):
copy_src = True
if copy_src:
copy_src = match_to_path(sources, line, test_source)
continue
# headers section found
if re.match('HEADERS', line):
copy_hdr = True
if copy_hdr:
copy_hdr = match_to_path(headers, line, test_header)
continue
# forms section found
if re.match('FORMS', line):
copy_ui = True
if copy_ui:
copy_ui = match_to_path(uis, line)
continue
for line in cmake_in:
cmake_out.write(line)
# sources section found
if re.match('^set\( photivo_SRCS', line):
cmake_out.write(' ' + '\n '.join(sources))
# headers section found
if re.match('^set\( photivo_MOC_HDRS', line):
cmake_out.write(' ' + '\n '.join(headers))
# forms section found
if re.match('^set\( photivo_UI_HDRS', line):
cmake_out.write(' ' + '\n '.join(uis))
cmake_in.close()
qmake_pro.close()
cmake_out.close()
| Python |
#!/usr/bin/env python
#
# scons-time - run SCons timings and collect statistics
#
# A script for running a configuration through SCons with a standard
# set of invocations to collect timing and memory statistics and to
# capture the results in a consistent set of output files for display
# and analysis.
#
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division
from __future__ import nested_scopes
__revision__ = "src/script/scons-time.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import getopt
import glob
import os
import re
import shutil
import sys
import tempfile
import time
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=False):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
if os.environ.get('SCONS_HORRIBLE_REGRESSION_TEST_HACK') is not None:
# We can't apply the 'callable' fixer until the floor is 2.6, but the
# '-3' option to Python 2.6 and 2.7 generates almost ten thousand
# warnings. This hack allows us to run regression tests with the '-3'
# option by replacing the callable() built-in function with a hack
# that performs the same function but doesn't generate the warning.
# Note that this hack is ONLY intended to be used for regression
# testing, and should NEVER be used for real runs.
from types import ClassType
def callable(obj):
if hasattr(obj, '__call__'): return True
if isinstance(obj, (ClassType, type)): return True
return False
def make_temp_file(**kw):
try:
result = tempfile.mktemp(**kw)
try:
result = os.path.realpath(result)
except AttributeError:
# Python 2.1 has no os.path.realpath() method.
pass
except TypeError:
try:
save_template = tempfile.template
prefix = kw['prefix']
del kw['prefix']
tempfile.template = prefix
result = tempfile.mktemp(**kw)
finally:
tempfile.template = save_template
return result
def HACK_for_exec(cmd, *args):
'''
For some reason, Python won't allow an exec() within a function
that also declares an internal function (including lambda functions).
This function is a hack that calls exec() in a function with no
internal functions.
'''
if not args: exec(cmd)
elif len(args) == 1: exec cmd in args[0]
else: exec cmd in args[0], args[1]
class Plotter(object):
def increment_size(self, largest):
"""
Return the size of each horizontal increment line for a specified
maximum value. This returns a value that will provide somewhere
between 5 and 9 horizontal lines on the graph, on some set of
boundaries that are multiples of 10/100/1000/etc.
"""
i = largest // 5
if not i:
return largest
multiplier = 1
while i >= 10:
i = i // 10
multiplier = multiplier * 10
return i * multiplier
def max_graph_value(self, largest):
# Round up to next integer.
largest = int(largest) + 1
increment = self.increment_size(largest)
return ((largest + increment - 1) // increment) * increment
class Line(object):
def __init__(self, points, type, title, label, comment, fmt="%s %s"):
self.points = points
self.type = type
self.title = title
self.label = label
self.comment = comment
self.fmt = fmt
def print_label(self, inx, x, y):
if self.label:
print 'set label %s "%s" at %s,%s right' % (inx, self.label, x, y)
def plot_string(self):
if self.title:
title_string = 'title "%s"' % self.title
else:
title_string = 'notitle'
return "'-' %s with lines lt %s" % (title_string, self.type)
def print_points(self, fmt=None):
if fmt is None:
fmt = self.fmt
if self.comment:
print '# %s' % self.comment
for x, y in self.points:
# If y is None, it usually represents some kind of break
# in the line's index number. We might want to represent
# this some way rather than just drawing the line straight
# between the two points on either side.
if not y is None:
print fmt % (x, y)
print 'e'
def get_x_values(self):
return [ p[0] for p in self.points ]
def get_y_values(self):
return [ p[1] for p in self.points ]
class Gnuplotter(Plotter):
def __init__(self, title, key_location):
self.lines = []
self.title = title
self.key_location = key_location
def line(self, points, type, title=None, label=None, comment=None, fmt='%s %s'):
if points:
line = Line(points, type, title, label, comment, fmt)
self.lines.append(line)
def plot_string(self, line):
return line.plot_string()
def vertical_bar(self, x, type, label, comment):
if self.get_min_x() <= x and x <= self.get_max_x():
points = [(x, 0), (x, self.max_graph_value(self.get_max_y()))]
self.line(points, type, label, comment)
def get_all_x_values(self):
result = []
for line in self.lines:
result.extend(line.get_x_values())
return [r for r in result if not r is None]
def get_all_y_values(self):
result = []
for line in self.lines:
result.extend(line.get_y_values())
return [r for r in result if not r is None]
def get_min_x(self):
try:
return self.min_x
except AttributeError:
try:
self.min_x = min(self.get_all_x_values())
except ValueError:
self.min_x = 0
return self.min_x
def get_max_x(self):
try:
return self.max_x
except AttributeError:
try:
self.max_x = max(self.get_all_x_values())
except ValueError:
self.max_x = 0
return self.max_x
def get_min_y(self):
try:
return self.min_y
except AttributeError:
try:
self.min_y = min(self.get_all_y_values())
except ValueError:
self.min_y = 0
return self.min_y
def get_max_y(self):
try:
return self.max_y
except AttributeError:
try:
self.max_y = max(self.get_all_y_values())
except ValueError:
self.max_y = 0
return self.max_y
def draw(self):
if not self.lines:
return
if self.title:
print 'set title "%s"' % self.title
print 'set key %s' % self.key_location
min_y = self.get_min_y()
max_y = self.max_graph_value(self.get_max_y())
incr = (max_y - min_y) / 10.0
start = min_y + (max_y / 2.0) + (2.0 * incr)
position = [ start - (i * incr) for i in range(5) ]
inx = 1
for line in self.lines:
line.print_label(inx, line.points[0][0]-1,
position[(inx-1) % len(position)])
inx += 1
plot_strings = [ self.plot_string(l) for l in self.lines ]
print 'plot ' + ', \\\n '.join(plot_strings)
for line in self.lines:
line.print_points()
def untar(fname):
import tarfile
tar = tarfile.open(name=fname, mode='r')
for tarinfo in tar:
tar.extract(tarinfo)
tar.close()
def unzip(fname):
import zipfile
zf = zipfile.ZipFile(fname, 'r')
for name in zf.namelist():
dir = os.path.dirname(name)
try:
os.makedirs(dir)
except:
pass
open(name, 'w').write(zf.read(name))
def read_tree(dir):
for dirpath, dirnames, filenames in os.walk(dir):
for fn in filenames:
fn = os.path.join(dirpath, fn)
if os.path.isfile(fn):
open(fn, 'rb').read()
def redirect_to_file(command, log):
return '%s > %s 2>&1' % (command, log)
def tee_to_file(command, log):
return '%s 2>&1 | tee %s' % (command, log)
class SConsTimer(object):
"""
Usage: scons-time SUBCOMMAND [ARGUMENTS]
Type "scons-time help SUBCOMMAND" for help on a specific subcommand.
Available subcommands:
func Extract test-run data for a function
help Provides help
mem Extract --debug=memory data from test runs
obj Extract --debug=count data from test runs
time Extract --debug=time data from test runs
run Runs a test configuration
"""
name = 'scons-time'
name_spaces = ' '*len(name)
def makedict(**kw):
return kw
default_settings = makedict(
aegis = 'aegis',
aegis_project = None,
chdir = None,
config_file = None,
initial_commands = [],
key_location = 'bottom left',
orig_cwd = os.getcwd(),
outdir = None,
prefix = '',
python = '"%s"' % sys.executable,
redirect = redirect_to_file,
scons = None,
scons_flags = '--debug=count --debug=memory --debug=time --debug=memoizer',
scons_lib_dir = None,
scons_wrapper = None,
startup_targets = '--help',
subdir = None,
subversion_url = None,
svn = 'svn',
svn_co_flag = '-q',
tar = 'tar',
targets = '',
targets0 = None,
targets1 = None,
targets2 = None,
title = None,
unzip = 'unzip',
verbose = False,
vertical_bars = [],
unpack_map = {
'.tar.gz' : (untar, '%(tar)s xzf %%s'),
'.tgz' : (untar, '%(tar)s xzf %%s'),
'.tar' : (untar, '%(tar)s xf %%s'),
'.zip' : (unzip, '%(unzip)s %%s'),
},
)
run_titles = [
'Startup',
'Full build',
'Up-to-date build',
]
run_commands = [
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof0)s %(targets0)s',
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof1)s %(targets1)s',
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof2)s %(targets2)s',
]
stages = [
'pre-read',
'post-read',
'pre-build',
'post-build',
]
stage_strings = {
'pre-read' : 'Memory before reading SConscript files:',
'post-read' : 'Memory after reading SConscript files:',
'pre-build' : 'Memory before building targets:',
'post-build' : 'Memory after building targets:',
}
memory_string_all = 'Memory '
default_stage = stages[-1]
time_strings = {
'total' : 'Total build time',
'SConscripts' : 'Total SConscript file execution time',
'SCons' : 'Total SCons execution time',
'commands' : 'Total command execution time',
}
time_string_all = 'Total .* time'
#
def __init__(self):
self.__dict__.update(self.default_settings)
# Functions for displaying and executing commands.
def subst(self, x, dictionary):
try:
return x % dictionary
except TypeError:
# x isn't a string (it's probably a Python function),
# so just return it.
return x
def subst_variables(self, command, dictionary):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
try:
command + ''
except TypeError:
action = command[0]
string = command[1]
args = command[2:]
else:
action = command
string = action
args = (())
action = self.subst(action, dictionary)
string = self.subst(string, dictionary)
return (action, string, args)
def _do_not_display(self, msg, *args):
pass
def display(self, msg, *args):
"""
Displays the specified message.
Each message is prepended with a standard prefix of our name
plus the time.
"""
if callable(msg):
msg = msg(*args)
else:
msg = msg % args
if msg is None:
return
fmt = '%s[%s]: %s\n'
sys.stdout.write(fmt % (self.name, time.strftime('%H:%M:%S'), msg))
def _do_not_execute(self, action, *args):
pass
def execute(self, action, *args):
"""
Executes the specified action.
The action is called if it's a callable Python function, and
otherwise passed to os.system().
"""
if callable(action):
action(*args)
else:
os.system(action % args)
def run_command_list(self, commands, dict):
"""
Executes a list of commands, substituting values from the
specified dictionary.
"""
commands = [ self.subst_variables(c, dict) for c in commands ]
for action, string, args in commands:
self.display(string, *args)
sys.stdout.flush()
status = self.execute(action, *args)
if status:
sys.exit(status)
def log_display(self, command, log):
command = self.subst(command, self.__dict__)
if log:
command = self.redirect(command, log)
return command
def log_execute(self, command, log):
command = self.subst(command, self.__dict__)
output = os.popen(command).read()
if self.verbose:
sys.stdout.write(output)
open(log, 'wb').write(output)
#
def archive_splitext(self, path):
"""
Splits an archive name into a filename base and extension.
This is like os.path.splitext() (which it calls) except that it
also looks for '.tar.gz' and treats it as an atomic extensions.
"""
if path.endswith('.tar.gz'):
return path[:-7], path[-7:]
else:
return os.path.splitext(path)
def args_to_files(self, args, tail=None):
"""
Takes a list of arguments, expands any glob patterns, and
returns the last "tail" files from the list.
"""
files = []
for a in args:
files.extend(sorted(glob.glob(a)))
if tail:
files = files[-tail:]
return files
def ascii_table(self, files, columns,
line_function, file_function=lambda x: x,
*args, **kw):
header_fmt = ' '.join(['%12s'] * len(columns))
line_fmt = header_fmt + ' %s'
print header_fmt % columns
for file in files:
t = line_function(file, *args, **kw)
if t is None:
t = []
diff = len(columns) - len(t)
if diff > 0:
t += [''] * diff
t.append(file_function(file))
print line_fmt % tuple(t)
def collect_results(self, files, function, *args, **kw):
results = {}
for file in files:
base = os.path.splitext(file)[0]
run, index = base.split('-')[-2:]
run = int(run)
index = int(index)
value = function(file, *args, **kw)
try:
r = results[index]
except KeyError:
r = []
results[index] = r
r.append((run, value))
return results
def doc_to_help(self, obj):
"""
Translates an object's __doc__ string into help text.
This strips a consistent number of spaces from each line in the
help text, essentially "outdenting" the text to the left-most
column.
"""
doc = obj.__doc__
if doc is None:
return ''
return self.outdent(doc)
def find_next_run_number(self, dir, prefix):
"""
Returns the next run number in a directory for the specified prefix.
Examines the contents the specified directory for files with the
specified prefix, extracts the run numbers from each file name,
and returns the next run number after the largest it finds.
"""
x = re.compile(re.escape(prefix) + '-([0-9]+).*')
matches = [x.match(e) for e in os.listdir(dir)]
matches = [_f for _f in matches if _f]
if not matches:
return 0
run_numbers = [int(m.group(1)) for m in matches]
return int(max(run_numbers)) + 1
def gnuplot_results(self, results, fmt='%s %.3f'):
"""
Prints out a set of results in Gnuplot format.
"""
gp = Gnuplotter(self.title, self.key_location)
for i in sorted(results.keys()):
try:
t = self.run_titles[i]
except IndexError:
t = '??? %s ???' % i
results[i].sort()
gp.line(results[i], i+1, t, None, t, fmt=fmt)
for bar_tuple in self.vertical_bars:
try:
x, type, label, comment = bar_tuple
except ValueError:
x, type, label = bar_tuple
comment = label
gp.vertical_bar(x, type, label, comment)
gp.draw()
def logfile_name(self, invocation):
"""
Returns the absolute path of a log file for the specificed
invocation number.
"""
name = self.prefix_run + '-%d.log' % invocation
return os.path.join(self.outdir, name)
def outdent(self, s):
"""
Strip as many spaces from each line as are found at the beginning
of the first line in the list.
"""
lines = s.split('\n')
if lines[0] == '':
lines = lines[1:]
spaces = re.match(' *', lines[0]).group(0)
def strip_initial_spaces(l, s=spaces):
if l.startswith(spaces):
l = l[len(spaces):]
return l
return '\n'.join([ strip_initial_spaces(l) for l in lines ]) + '\n'
def profile_name(self, invocation):
"""
Returns the absolute path of a profile file for the specified
invocation number.
"""
name = self.prefix_run + '-%d.prof' % invocation
return os.path.join(self.outdir, name)
def set_env(self, key, value):
os.environ[key] = value
#
def get_debug_times(self, file, time_string=None):
"""
Fetch times from the --debug=time strings in the specified file.
"""
if time_string is None:
search_string = self.time_string_all
else:
search_string = time_string
contents = open(file).read()
if not contents:
sys.stderr.write('file %s has no contents!\n' % repr(file))
return None
result = re.findall(r'%s: ([\d\.]*)' % search_string, contents)[-4:]
result = [ float(r) for r in result ]
if not time_string is None:
try:
result = result[0]
except IndexError:
sys.stderr.write('file %s has no results!\n' % repr(file))
return None
return result
def get_function_profile(self, file, function):
"""
Returns the file, line number, function name, and cumulative time.
"""
try:
import pstats
except ImportError, e:
sys.stderr.write('%s: func: %s\n' % (self.name, e))
sys.stderr.write('%s This version of Python is missing the profiler.\n' % self.name_spaces)
sys.stderr.write('%s Cannot use the "func" subcommand.\n' % self.name_spaces)
sys.exit(1)
statistics = pstats.Stats(file).stats
matches = [ e for e in statistics.items() if e[0][2] == function ]
r = matches[0]
return r[0][0], r[0][1], r[0][2], r[1][3]
def get_function_time(self, file, function):
"""
Returns just the cumulative time for the specified function.
"""
return self.get_function_profile(file, function)[3]
def get_memory(self, file, memory_string=None):
"""
Returns a list of integers of the amount of memory used. The
default behavior is to return all the stages.
"""
if memory_string is None:
search_string = self.memory_string_all
else:
search_string = memory_string
lines = open(file).readlines()
lines = [ l for l in lines if l.startswith(search_string) ][-4:]
result = [ int(l.split()[-1]) for l in lines[-4:] ]
if len(result) == 1:
result = result[0]
return result
def get_object_counts(self, file, object_name, index=None):
"""
Returns the counts of the specified object_name.
"""
object_string = ' ' + object_name + '\n'
lines = open(file).readlines()
line = [ l for l in lines if l.endswith(object_string) ][0]
result = [ int(field) for field in line.split()[:4] ]
if index is not None:
result = result[index]
return result
#
command_alias = {}
def execute_subcommand(self, argv):
"""
Executes the do_*() function for the specified subcommand (argv[0]).
"""
if not argv:
return
cmdName = self.command_alias.get(argv[0], argv[0])
try:
func = getattr(self, 'do_' + cmdName)
except AttributeError:
return self.default(argv)
try:
return func(argv)
except TypeError, e:
sys.stderr.write("%s %s: %s\n" % (self.name, cmdName, e))
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.write("Try '%s help %s'\n" % (self.name, cmdName))
def default(self, argv):
"""
The default behavior for an unknown subcommand. Prints an
error message and exits.
"""
sys.stderr.write('%s: Unknown subcommand "%s".\n' % (self.name, argv[0]))
sys.stderr.write('Type "%s help" for usage.\n' % self.name)
sys.exit(1)
#
def do_help(self, argv):
"""
"""
if argv[1:]:
for arg in argv[1:]:
try:
func = getattr(self, 'do_' + arg)
except AttributeError:
sys.stderr.write('%s: No help for "%s"\n' % (self.name, arg))
else:
try:
help = getattr(self, 'help_' + arg)
except AttributeError:
sys.stdout.write(self.doc_to_help(func))
sys.stdout.flush()
else:
help()
else:
doc = self.doc_to_help(self.__class__)
if doc:
sys.stdout.write(doc)
sys.stdout.flush()
return None
#
def help_func(self):
help = """\
Usage: scons-time func [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
--func=NAME, --function=NAME Report time for function NAME
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_func(self, argv):
"""
"""
format = 'ascii'
function_name = '_main'
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'func=',
'function=',
'help',
'prefix=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('--func', '--function'):
function_name = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'func'])
sys.exit(0)
elif o in ('--max',):
max_time = int(a)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if self.config_file:
exec open(self.config_file, 'rU').read() in self.__dict__
if self.chdir:
os.chdir(self.chdir)
if not args:
pattern = '%s*.prof' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: func: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.prof files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help func" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
for file in args:
try:
f, line, func, time = \
self.get_function_profile(file, function_name)
except ValueError, e:
sys.stderr.write("%s: func: %s: %s\n" %
(self.name, file, e))
else:
if f.startswith(cwd_):
f = f[len(cwd_):]
print "%.3f %s:%d(%s)" % (time, f, line, func)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_function_time,
function_name)
self.gnuplot_results(results)
else:
sys.stderr.write('%s: func: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
#
def help_mem(self):
help = """\
Usage: scons-time mem [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--stage=STAGE Plot memory at the specified stage:
pre-read, post-read, pre-build,
post-build (default: post-build)
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_mem(self, argv):
format = 'ascii'
logfile_path = lambda x: x
stage = self.default_stage
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'stage=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'mem'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--stage',):
if not a in self.stages:
sys.stderr.write('%s: mem: Unrecognized stage "%s".\n' % (self.name, a))
sys.exit(1)
stage = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: mem: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help mem" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
self.ascii_table(args, tuple(self.stages), self.get_memory, logfile_path)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_memory,
self.stage_strings[stage])
self.gnuplot_results(results)
else:
sys.stderr.write('%s: mem: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
return 0
#
def help_obj(self):
help = """\
Usage: scons-time obj [OPTIONS] OBJECT FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--stage=STAGE Plot memory at the specified stage:
pre-read, post-read, pre-build,
post-build (default: post-build)
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_obj(self, argv):
format = 'ascii'
logfile_path = lambda x: x
stage = self.default_stage
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'stage=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'obj'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--stage',):
if not a in self.stages:
sys.stderr.write('%s: obj: Unrecognized stage "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
stage = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if not args:
sys.stderr.write('%s: obj: Must specify an object name.\n' % self.name)
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
object_name = args.pop(0)
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: obj: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
self.ascii_table(args, tuple(self.stages), self.get_object_counts, logfile_path, object_name)
elif format == 'gnuplot':
stage_index = 0
for s in self.stages:
if stage == s:
break
stage_index = stage_index + 1
results = self.collect_results(args, self.get_object_counts,
object_name, stage_index)
self.gnuplot_results(results)
else:
sys.stderr.write('%s: obj: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
return 0
#
def help_run(self):
help = """\
Usage: scons-time run [OPTIONS] [FILE ...]
--aegis=PROJECT Use SCons from the Aegis PROJECT
--chdir=DIR Name of unpacked directory for chdir
-f FILE, --file=FILE Read configuration from specified FILE
-h, --help Print this help and exit
-n, --no-exec No execute, just print command lines
--number=NUMBER Put output in files for run NUMBER
--outdir=OUTDIR Put output files in OUTDIR
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--python=PYTHON Time using the specified PYTHON
-q, --quiet Don't print command lines
--scons=SCONS Time using the specified SCONS
--svn=URL, --subversion=URL Use SCons from Subversion URL
-v, --verbose Display output of commands
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_run(self, argv):
"""
"""
run_number_list = [None]
short_opts = '?f:hnp:qs:v'
long_opts = [
'aegis=',
'file=',
'help',
'no-exec',
'number=',
'outdir=',
'prefix=',
'python=',
'quiet',
'scons=',
'svn=',
'subdir=',
'subversion=',
'verbose',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('--aegis',):
self.aegis_project = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'run'])
sys.exit(0)
elif o in ('-n', '--no-exec'):
self.execute = self._do_not_execute
elif o in ('--number',):
run_number_list = self.split_run_numbers(a)
elif o in ('--outdir',):
self.outdir = a
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--python',):
self.python = a
elif o in ('-q', '--quiet'):
self.display = self._do_not_display
elif o in ('-s', '--subdir'):
self.subdir = a
elif o in ('--scons',):
self.scons = a
elif o in ('--svn', '--subversion'):
self.subversion_url = a
elif o in ('-v', '--verbose'):
self.redirect = tee_to_file
self.verbose = True
self.svn_co_flag = ''
if not args and not self.config_file:
sys.stderr.write('%s: run: No arguments or -f config file specified.\n' % self.name)
sys.stderr.write('%s Type "%s help run" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
if self.config_file:
exec open(self.config_file, 'rU').read() in self.__dict__
if args:
self.archive_list = args
archive_file_name = os.path.split(self.archive_list[0])[1]
if not self.subdir:
self.subdir = self.archive_splitext(archive_file_name)[0]
if not self.prefix:
self.prefix = self.archive_splitext(archive_file_name)[0]
prepare = None
if self.subversion_url:
prepare = self.prep_subversion_run
elif self.aegis_project:
prepare = self.prep_aegis_run
for run_number in run_number_list:
self.individual_run(run_number, self.archive_list, prepare)
def split_run_numbers(self, s):
result = []
for n in s.split(','):
try:
x, y = n.split('-')
except ValueError:
result.append(int(n))
else:
result.extend(list(range(int(x), int(y)+1)))
return result
def scons_path(self, dir):
return os.path.join(dir, 'src', 'script', 'scons.py')
def scons_lib_dir_path(self, dir):
return os.path.join(dir, 'src', 'engine')
def prep_aegis_run(self, commands, removals):
self.aegis_tmpdir = make_temp_file(prefix = self.name + '-aegis-')
removals.append((shutil.rmtree, 'rm -rf %%s', self.aegis_tmpdir))
self.aegis_parent_project = os.path.splitext(self.aegis_project)[0]
self.scons = self.scons_path(self.aegis_tmpdir)
self.scons_lib_dir = self.scons_lib_dir_path(self.aegis_tmpdir)
commands.extend([
'mkdir %(aegis_tmpdir)s',
(lambda: os.chdir(self.aegis_tmpdir), 'cd %(aegis_tmpdir)s'),
'%(aegis)s -cp -ind -p %(aegis_parent_project)s .',
'%(aegis)s -cp -ind -p %(aegis_project)s -delta %(run_number)s .',
])
def prep_subversion_run(self, commands, removals):
self.svn_tmpdir = make_temp_file(prefix = self.name + '-svn-')
removals.append((shutil.rmtree, 'rm -rf %%s', self.svn_tmpdir))
self.scons = self.scons_path(self.svn_tmpdir)
self.scons_lib_dir = self.scons_lib_dir_path(self.svn_tmpdir)
commands.extend([
'mkdir %(svn_tmpdir)s',
'%(svn)s co %(svn_co_flag)s -r %(run_number)s %(subversion_url)s %(svn_tmpdir)s',
])
def individual_run(self, run_number, archive_list, prepare=None):
"""
Performs an individual run of the default SCons invocations.
"""
commands = []
removals = []
if prepare:
prepare(commands, removals)
save_scons = self.scons
save_scons_wrapper = self.scons_wrapper
save_scons_lib_dir = self.scons_lib_dir
if self.outdir is None:
self.outdir = self.orig_cwd
elif not os.path.isabs(self.outdir):
self.outdir = os.path.join(self.orig_cwd, self.outdir)
if self.scons is None:
self.scons = self.scons_path(self.orig_cwd)
if self.scons_lib_dir is None:
self.scons_lib_dir = self.scons_lib_dir_path(self.orig_cwd)
if self.scons_wrapper is None:
self.scons_wrapper = self.scons
if not run_number:
run_number = self.find_next_run_number(self.outdir, self.prefix)
self.run_number = str(run_number)
self.prefix_run = self.prefix + '-%03d' % run_number
if self.targets0 is None:
self.targets0 = self.startup_targets
if self.targets1 is None:
self.targets1 = self.targets
if self.targets2 is None:
self.targets2 = self.targets
self.tmpdir = make_temp_file(prefix = self.name + '-')
commands.extend([
'mkdir %(tmpdir)s',
(os.chdir, 'cd %%s', self.tmpdir),
])
for archive in archive_list:
if not os.path.isabs(archive):
archive = os.path.join(self.orig_cwd, archive)
if os.path.isdir(archive):
dest = os.path.split(archive)[1]
commands.append((shutil.copytree, 'cp -r %%s %%s', archive, dest))
else:
suffix = self.archive_splitext(archive)[1]
unpack_command = self.unpack_map.get(suffix)
if not unpack_command:
dest = os.path.split(archive)[1]
commands.append((shutil.copyfile, 'cp %%s %%s', archive, dest))
else:
commands.append(unpack_command + (archive,))
commands.extend([
(os.chdir, 'cd %%s', self.subdir),
])
commands.extend(self.initial_commands)
commands.extend([
(lambda: read_tree('.'),
'find * -type f | xargs cat > /dev/null'),
(self.set_env, 'export %%s=%%s',
'SCONS_LIB_DIR', self.scons_lib_dir),
'%(python)s %(scons_wrapper)s --version',
])
index = 0
for run_command in self.run_commands:
setattr(self, 'prof%d' % index, self.profile_name(index))
c = (
self.log_execute,
self.log_display,
run_command,
self.logfile_name(index),
)
commands.append(c)
index = index + 1
commands.extend([
(os.chdir, 'cd %%s', self.orig_cwd),
])
if not os.environ.get('PRESERVE'):
commands.extend(removals)
commands.append((shutil.rmtree, 'rm -rf %%s', self.tmpdir))
self.run_command_list(commands, self.__dict__)
self.scons = save_scons
self.scons_lib_dir = save_scons_lib_dir
self.scons_wrapper = save_scons_wrapper
#
def help_time(self):
help = """\
Usage: scons-time time [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--which=TIMER Plot timings for TIMER: total,
SConscripts, SCons, commands.
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_time(self, argv):
format = 'ascii'
logfile_path = lambda x: x
tail = None
which = 'total'
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'tail=',
'title=',
'which=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'time'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
elif o in ('--which',):
if not a in self.time_strings.keys():
sys.stderr.write('%s: time: Unrecognized timer "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
which = a
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: time: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
columns = ("Total", "SConscripts", "SCons", "commands")
self.ascii_table(args, columns, self.get_debug_times, logfile_path)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_debug_times,
self.time_strings[which])
self.gnuplot_results(results, fmt='%s %.6f')
else:
sys.stderr.write('%s: time: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'h?V', ['help', 'version'])
ST = SConsTimer()
for o, a in opts:
if o in ('-?', '-h', '--help'):
ST.do_help(['help'])
sys.exit(0)
elif o in ('-V', '--version'):
sys.stdout.write('scons-time version\n')
sys.exit(0)
if not args:
sys.stderr.write('Type "%s help" for usage.\n' % ST.name)
sys.exit(1)
ST.execute_subcommand(args)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
################################################################################
##
## Photivo
##
## Copyright (C) 2013 Jos De Laender <jos@de-laender.be>
##
## This file is part of Photivo.
##
## Photivo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License version 3
## as published by the Free Software Foundation.
##
## Photivo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Photivo. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import os
import sys
import platform
import fnmatch
import glob
import atexit
################################################################################
# Minimum requirements;
ptMinGCCVersion = '4.6.0'
ptMinQtVersion = '4.7.0'
ptMinGMVersion = '1.3.12'
ptMinGMWandVersion = ptMinGMVersion
ptMinExiv2Version = '0.19'
ptMinLcms2Version = '2.1'
ptMinGlib2Version = '2.18'
ptMinLensfunVersion = '0.2.5'
ptMinFftw3Version = '3.2.2'
ptMinLqr1Version = '0.4.1'
ptMinGimp20Version = '2.6.10' # only when gimp plugin
# Custom libjpeg checks. Has no pkg-config equivalent.
ptMinLibJpegVersion = 62
ptMaxLibJpegVersion = 80 # Until notice of problem we allow also up to jpeg 8.0
################################################################################
ptPlatforms = ['darwin','posix','win32']
ptArchitectures = ['x86','x86_64']
################################################################################
# Clean exit and exit logging.
atexit.register(ptLastCalledAtExit)
################################################################################
# Announce ourselves as the build program.
print ''
print ptBoldYellow + \
'This is the scons build program for Photivo.\n' + \
'Copyright (C) 2013 Jos De Laender <jos@de-laender.be>' + \
ptNoAttrs;
print ''
# Help, options and variables boiler plate.
HelpText = '''
Usage : scons [-Q] [--ptVerbose] [--ptVerboseConfig] [--ptBuildConfFile=FILE] [install]
-Q : Quiet about reading/building progress messages.
(default : not quiet)
--ptVerbose : Verbose output of progress during compile.
(default : not verbose)
--ptVerboseConfig : Verbose output of progress during config.
(default : not verbose)
--ptBuildConfFile : File that describes the build parameters.
(default = DefaultBuild.py)
install : Install in directory (defined by PT_INSTALL_PATH)
'''
Help(HelpText)
AddOption('--ptBuildConfFile',
dest = 'ptBuildConfFile',
type = 'string',
nargs = 1,
action = 'store',
metavar = 'FILE',
default = 'BuildConfs/DefaultBuild.py')
AddOption('--ptVerbose',
dest = 'ptVerbose',
action = 'store_true',
default = False)
AddOption('--ptVerboseConfig',
dest = 'ptVerboseConfig',
action = 'store_true',
default = False)
ptBuildConfFile = GetOption('ptBuildConfFile')
ptVerbose = GetOption('ptVerbose')
ptVerboseConfig = GetOption('ptVerboseConfig')
print ptBoldCyan + \
'Reading build configuration from \'' + ptBuildConfFile + '\'' + \
ptNoAttrs
# Use of simple file input (without 'Variables()' and command line input)
# enables a simpler and more correct guessing of values in more
# complex cases of local qt, gcc, etc ..
if not os.path.exists(ptBuildConfFile):
print ptBoldRed + \
'No such ptBuildConfFile : ' , ptBuildConfFile , \
ptNoAttrs
print ptNoAttrs + HelpText
Exit(1)
ptValidOptions = ['CC',
'CXX',
'PT_BUILD_CONF_NAME',
'PT_CROSS',
'PT_HOST_PLATFORM',
'PT_INSTALL_MODE',
'PT_INSTALL_PATH',
'PT_LOGFILE_NAME',
'PT_OMP',
'PT_RELEASE',
'PT_TARGET_ARCHITECTURE',
'PT_TARGET_PLATFORM',
'PT_TOOLS_DIR',
'PT_WITH_CONSOLE',
'PT_WITH_FILEMGR',
'PT_WITH_GIMPPLUGIN',
'PT_WITH_SYSTEMCIMG',
'PKG_CONFIG_PATH',
'QT4DIR']
# Defaults.
ptBuildValues = {'PT_BUILD_CONF_NAME' : 'Build',
'PT_CROSS' : '',
'PT_INSTALL_MODE' : 'Original',
'PT_INSTALL_PATH' : '/opt/Photivo',
'PT_OMP' : True,
'PT_RELEASE' : True,
'PT_WITH_FILEMGR' : False,
'PT_WITH_GIMPPLUGIN' : False,
'PT_WITH_SYSTEMCIMG' : False,
'PT_WITH_CONSOLE' : False}
# Read them from file
exec open(ptBuildConfFile, 'rU').read() in {}, ptBuildValues
#for key,value in ptBuildValues.items():
# print key + ' => ' + str(value)
# A default environment to start from.
ptDefaultEnv = Environment(CC = 'gcc', CXX = 'g++')
# For later reference. The unaltered one.
ptDefaultEnv['PT_DEFAULT_PATH'] = ptDefaultEnv['ENV']['PATH']
# In case of mingw we pull in lots of the environment.
# Forget the "isolated" environment in this case, but that's a minor issue
# I guess in this context. Unless someone would start to crosscompile under
# mingw, but I don't believe we are going it to make as insane as that ..
if sys.platform in ['win32'] :
print ptBoldBlue + \
'I seem to be running on a windows platform. ' + \
'Please note I assume to work under MSYS ' + \
'set up as in the wiki. Anything else will ' + \
'currently fail.' + \
ptNoAttrs
ptOsEnv = dict(os.environ)
# Path from MSYS
ptDefaultEnv['ENV']['PATH'] = \
ptDefaultEnv['ENV']['PATH'] + \
os.pathsep + \
ptOsEnv['PATH'].replace("/","\\")
# Additional flags from MSYS (see wiki)
ptDefaultEnv.MergeFlags(ptDefaultEnv.ParseFlags(ptOsEnv['LDFLAGS']))
ptDefaultEnv.MergeFlags(ptDefaultEnv.ParseFlags(ptOsEnv['CFLAGS']))
ptDefaultEnv.MergeFlags(ptDefaultEnv.ParseFlags(ptOsEnv['CXXFLAGS']))
# Additional PKG_CONFIG_PATH from MSYS (see wiki)
if 'PKG_CONFIG_PATH' in ptOsEnv :
ptOsPkgConfigPath = ptOsEnv['PKG_CONFIG_PATH'].replace("/","\\")
if not 'PKG_CONFIG_PATH' in ptDefaultEnv['ENV']:
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] = ptOsPkgConfigPath
else:
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] = \
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] + os.pathsep + ptOsPkgConfigPath
# Local MSYS compatible workaround for long link line.
ptDefaultEnv['TEMPFILE'] = ptTempFileMunge
ptDefaultEnv['LINKCOM'] = '${TEMPFILE("%s")}' % (ptDefaultEnv['LINKCOM'])
#print ptDefaultEnv.Dump()
# Do we have CC and CXX ?
if (ptDefaultEnv['CC'] == None) :
print ptBoldRed + 'CC not defined' + ptNoAttrs
print ptBoldRed + 'Giving up' + ptNoAttrs
Exit(1)
if (ptDefaultEnv['CXX'] == None) :
print ptBoldRed + 'CXX not defined' + ptNoAttrs
print ptBoldRed + 'Giving up' + ptNoAttrs
Exit(1)
# Throw everything that we recognize in the environment, overwriting.
for ptBuildKey,ptBuildValue in ptBuildValues.items():
if ptBuildKey in ptValidOptions:
ptDefaultEnv[ptBuildKey] = ptBuildValues[ptBuildKey]
else:
print ptBoldRed + \
'No such option : ' + ptBuildKey + \
' while reading ' + ptBuildConfFile + \
ptNoAttrs
print ptNoAttrs + HelpText
Exit(1)
# QT4DIR (name compatible with qt4 tool) via qmake if not yet in environment.
if not 'QT4DIR' in ptDefaultEnv:
ptEnv = Environment(ENV = os.environ)
qmake = ptEnv.WhereIs('qmake') or ptEnv.WhereIs('qmake-qt4')
if qmake:
ptDefaultEnv['QT4DIR'] = os.path.dirname(os.path.dirname(qmake))
else :
print ptBoldRed + \
'No QT4DIR found.' , \
ptNoAttrs
Exit(1)
# Check QT4DIR (user can have given wrong one)
if not os.path.isdir(ptDefaultEnv['QT4DIR']):
print ptBoldRed + \
'QT4DIR (' + ptDefaultEnv['QT4DIR'] + ') does not exist.' , \
ptNoAttrs
Exit(1)
# PT_TOOLS_DIR detection. If not yet in environment.
if not 'PT_TOOLS_DIR' in ptDefaultEnv:
cc = ptDefaultEnv.WhereIs(ptDefaultEnv['CC'])
if cc:
ptDefaultEnv['PT_TOOLS_DIR'] = os.path.dirname(cc)
else :
print ptBoldRed + \
'No PT_TOOLS_DIR found.' , \
ptNoAttrs
Exit(1)
# Check PT_TOOLS_DIR (user can have given wrong one)
if not os.path.isdir(ptDefaultEnv['PT_TOOLS_DIR']):
print ptBoldRed + \
'PT_TOOLS_DIR (' + ptDefaultEnv['PT_TOOLS_DIR'] + \
') does not exist.' , \
ptNoAttrs
Exit(1)
# PT_LOGFILE_NAME
if not 'PT_LOGFILE_NAME' in ptDefaultEnv:
ptDefaultEnv['PT_LOGFILE_NAME'] = ptDefaultEnv['PT_BUILD_CONF_NAME'] + '.log'
# Check PT_INSTALL_PATH
if not sys.platform in ['win32'] :
if not os.path.isdir(ptDefaultEnv['PT_INSTALL_PATH']):
print ptBoldRed + \
'PT_INSTALL_PATH (' + ptDefaultEnv['PT_INSTALL_PATH'] + \
') does not exist.' , \
ptNoAttrs
Exit(1)
# Target and host platform. Normally PLATFORM.
if not 'PT_TARGET_PLATFORM' in ptDefaultEnv:
ptDefaultEnv['PT_TARGET_PLATFORM'] = ptDefaultEnv['PLATFORM']
if not ptDefaultEnv['PT_TARGET_PLATFORM'] in ptPlatforms :
print ptBoldRed + \
'PT_TARGET_PLATFORM (' + ptDefaultEnv['PT_TARGET_PLATFORM'] + \
') should be in ' + str(ptPlatforms) + '.' + \
ptNoAttrs
Exit(1)
if not 'PT_HOST_PLATFORM' in ptDefaultEnv:
ptDefaultEnv['PT_HOST_PLATFORM'] = ptDefaultEnv['PLATFORM']
if not ptDefaultEnv['PT_HOST_PLATFORM'] in ptPlatforms :
print ptBoldRed + \
'PT_HOST_PLATFORM (' + ptDefaultEnv['PT_HOST_PLATFORM'] + \
') should be in ' + str(ptPlatforms) + '.' + \
ptNoAttrs
Exit(1)
# Target and host architecture.
if not 'PT_TARGET_ARCHITECTURE' in ptDefaultEnv:
ptArch = platform.architecture()[0]
if ptArch == '32bit' :
ptDefaultEnv['PT_TARGET_ARCHITECTURE'] = 'x86'
if ptArch == '64bit' :
ptDefaultEnv['PT_TARGET_ARCHITECTURE'] = 'x86_64'
if not ptDefaultEnv['PT_TARGET_ARCHITECTURE'] in ptArchitectures :
print ptBoldRed + \
'PT_TARGET_ARCHITECTURE (' + ptDefaultEnv['PT_TARGET_ARCHITECTURE'] + \
') should be in ' + str(ptArchitectures) + '.' + \
ptNoAttrs
Exit(1)
################################################################################
# Opening of LogFile
if not ptDefaultEnv['PT_LOGFILE_NAME']:
ptDefaultEnv['PT_LOGFILE_NAME'] = ptDefaultEnv['PT_BUILD_CONF_NAME'] + '.log'
ptLogFile = open(ptDefaultEnv['PT_LOGFILE_NAME'],'w',1) # Line buffered
ptDefaultEnv['PT_LOGFILE'] = ptLogFile
# I hope to duplicate compile errors (via stderr) into the log this way.
# TODO Find some win32 equivalent.
if not sys.platform in ['win32'] :
sys.stderr = os.popen('tee stderr.log','w')
atexit.register(ptAtExit,ptLogFile)
################################################################################
# Some extra derived environment.
# Spawn with stdout/stderr echoing from the child.
ptDefaultEnv['SPAWN'] = ptEchoSpawn
# Needed for above.
ptDefaultEnv['PT_VERBOSE'] = ptVerbose
ptDefaultEnv['PT_VERBOSECONFIG'] = ptVerboseConfig
# Extend the CC/CXX names for cross. XXX JDLA More might be needed. TODO
ptDefaultEnv['CC'] = ptDefaultEnv['PT_CROSS'] + ptDefaultEnv['CC']
ptDefaultEnv['CXX'] = ptDefaultEnv['PT_CROSS'] + ptDefaultEnv['CXX']
# Extend PATH with the found PT_TOOLS_DIR
ptDefaultEnv['ENV']['PATH'] = \
ptDefaultEnv['PT_TOOLS_DIR'] + os.pathsep + ptDefaultEnv['ENV']['PATH']
# Add or extend PKG_CONFIG_PATH
# Assuming that it is only needed if QT4DIR is 'non standard'
ptQtBin = os.path.join(str(ptDefaultEnv['QT4DIR']),'bin')
if not ptQtBin in ptDefaultEnv['PT_DEFAULT_PATH']:
ptPkgConfigPath = \
os.path.join(os.path.join(str(ptDefaultEnv['QT4DIR']),'lib'),'pkgconfig')
if not 'PKG_CONFIG_PATH' in ptDefaultEnv['ENV']:
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] = ptPkgConfigPath
else :
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] = \
ptDefaultEnv['ENV']['PKG_CONFIG_PATH'] + os.pathsep + ptPkgConfigPath
################################################################################
# Options summary so far.
ptDoPrint = False
if ptVerboseConfig:
ptDoPrint = True
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Configuration file : ' + ptBuildConfFile)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CC : ' + str(ptDefaultEnv['CC']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'CXX : ' + str(ptDefaultEnv['CXX']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_BUILD_CONF_NAME : ' + str(ptDefaultEnv['PT_BUILD_CONF_NAME']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_CROSS : ' + str(ptDefaultEnv['PT_CROSS']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_HOST_PLATFORM : ' + str(ptDefaultEnv['PT_HOST_PLATFORM']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_INSTALL_PATH : ' + str(ptDefaultEnv['PT_INSTALL_PATH']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_LOGFILE_NAME : ' + str(ptDefaultEnv['PT_LOGFILE_NAME']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_OMP : ' + str(ptDefaultEnv['PT_OMP']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_RELEASE : ' + str(ptDefaultEnv['PT_RELEASE']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_TARGET_ARCHITECTURE : ' + str(ptDefaultEnv['PT_TARGET_ARCHITECTURE']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_TARGET_PLATFORM : ' + str(ptDefaultEnv['PT_TARGET_PLATFORM']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_TOOLS_DIR : ' + str(ptDefaultEnv['PT_TOOLS_DIR']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_WITH_CONSOLE : ' + str(ptDefaultEnv['PT_WITH_CONSOLE']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_WITH_FILEMGR : ' + str(ptDefaultEnv['PT_WITH_FILEMGR']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_WITH_GIMPPLUGIN : ' + str(ptDefaultEnv['PT_WITH_GIMPPLUGIN']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'PT_WITH_SYTSTEMCIMG : ' + str(ptDefaultEnv['PT_WITH_SYSTEMCIMG']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'ENV[PATH] : ' + str(ptDefaultEnv['ENV']['PATH']))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'ENV[PKG_CONFIG_PATH] : ' + str(ptDefaultEnv['ENV'].get('PKG_CONFIG_PATH')))
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'QT4DIR : ' + str(ptDefaultEnv['QT4DIR']))
################################################################################
if ptDefaultEnv['PT_TARGET_PLATFORM'] == 'win32' :
ptDefaultEnv['PROGSUFFIX'] = '.exe'
################################################################################
# Minimum compiler version check.
if not ptCheckGCCVersion(ptDefaultEnv,ptMinGCCVersion) :
ptPrintLog(True,ptLogFile,ptBoldRed,
'GCC >= ' + ptMinGCCVersion + ' not found.')
ptVersion = ptGetGCCVersion(ptDefaultEnv)
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found GCC : ' + ptVersion[0])
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found G++ : ' + ptVersion[1])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
################################################################################
# Check for libraries pkg-config and Qt version.
ptConf = Configure(ptDefaultEnv,
custom_tests =
{'ptCheckPKGConfig' : ptCheckPKGConfig,
'ptCheckPKG' : ptCheckPKG ,
'ptCheckQt' : ptCheckQt ,
'ptCheckLibWithHeader' : ptCheckLibWithHeader,
'ptCheckHg' : ptCheckHg,
'ptCheckLibJpeg' : ptCheckLibJpeg,
'ptGetPKGOutput' : ptGetPKGOutput,
'ptGetQtOutput' : ptGetQtOutput})
# hg check
if not ptConf.ptCheckHg():
ptPrintLog(True,ptLogFile,ptBoldRed,'Mercurial (hg) not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# Version we are building
ptAppVersion = ptGetAppVersion()
# jpeg check. Note header file might be tricky and need tweak !
if not ptConf.ptCheckLibWithHeader('jpeg','jpeglib.h','cxx'):
ptPrintLog(True,ptLogFile,ptBoldRed,'Library jpeg (or jpeglib.h) not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'It is not unusual you have to add \n'
'"#include <stdlib.h>" and "#include <stdio.h>" \n'
'to your "jpeglib.h".')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# Additional custom test on jpeg lib version.
# TODO Check doesn't work for CROSS (can't execute it on host ..)
if ptDefaultEnv['PT_TARGET_PLATFORM'] == ptDefaultEnv['PT_HOST_PLATFORM'] :
if not ptConf.ptCheckLibJpeg(ptMinLibJpegVersion,ptMaxLibJpegVersion):
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# png check.
if not ptConf.ptCheckLibWithHeader('png','png.h','cxx'):
ptPrintLog(True,ptLogFile,ptBoldRed,'Library png not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# tiff check.
if not ptConf.ptCheckLibWithHeader('tiff','tiff.h','cxx'):
ptPrintLog(True,ptLogFile,ptBoldRed,'Library tiff not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# pkg-config check. (does 'cross' behind the scenes).
if not ptConf.ptCheckPKGConfig('0.25'):
ptPrintLog(True,ptLogFile,ptBoldRed,'pkg-config >= 0.25 not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
# lensfun check.
if not ptConf.ptCheckPKG('lensfun >= ' + ptMinLensfunVersion):
ptPrintLog(True,ptLogFile, ptBoldRed,
'lensfun >= ' + ptMinLensfunVersion + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('lensfun')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptLensfunVersionString,ptLensfunFlags] = ptConf.ptGetPKGOutput('lensfun')
# fftw3 check.
if not ptConf.ptCheckPKG('fftw3 >= ' + ptMinFftw3Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'fftw3 >= ' + ptMinFftw3Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('fftw3')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptFftw3VersionString,ptFftw3Flags] = ptConf.ptGetPKGOutput('fftw3')
# lqr-1 check.
if not ptConf.ptCheckPKG('lqr-1 >= ' + ptMinLqr1Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'lqr-1 >= ' + ptMinLqr1Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('lqr-1')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptLqr1VersionString,ptLqr1Flags] = ptConf.ptGetPKGOutput('lqr-1')
# glib-2.0 check.
if not ptConf.ptCheckPKG('glib-2.0 >= ' + ptMinGlib2Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'glib-2.0 >= ' + ptMinGlib2Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('glib-2.0')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptGlib2VersionString,ptGlib2Flags] = ptConf.ptGetPKGOutput('glib-2.0')
# exiv2 check.
if not ptConf.ptCheckPKG('exiv2 >= ' + ptMinExiv2Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'exiv2 >= ' + ptMinExiv2Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('exiv2')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptExiv2VersionString,ptExiv2Flags] = ptConf.ptGetPKGOutput('exiv2')
# lcms2 check.
if not ptConf.ptCheckPKG('lcms2 >= ' + ptMinLcms2Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'lcms2 >= ' + ptMinLcms2Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('lcms2')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptLcms2VersionString,ptLcms2Flags] = ptConf.ptGetPKGOutput('lcms2')
# GraphicsMagick check.
if not ptConf.ptCheckPKG('GraphicsMagick++ >= ' + ptMinGMVersion):
ptPrintLog(True,ptLogFile, ptBoldRed,
'Magick++ >= ' + ptMinGMVersion + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('GraphicsMagick++')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptGMVersionString,ptGMFlags] = ptConf.ptGetPKGOutput('GraphicsMagick++')
# GraphicsMagickWand check.
if not ptConf.ptCheckPKG('GraphicsMagickWand >= ' + ptMinGMWandVersion):
ptPrintLog(True,ptLogFile, ptBoldRed,
'MagickWand >= ' + ptMinGMWandVersion + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('GraphicsMagickWand')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptGMWandVersionString,ptGMWandFlags] = ptConf.ptGetPKGOutput('GraphicsMagickWand')
# QT check.
if not ptConf.ptCheckQt(ptMinQtVersion) :
ptPrintLog(True,ptLogFile, ptBoldRed,
'Qt >= ' + ptMinQtVersion + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetQtOutput()[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptQtVersionString,ptQtFlags] = ptConf.ptGetQtOutput()
# libgimp check in case we are working with GIMPPLUGIN
if ptDefaultEnv['PT_WITH_GIMPPLUGIN']:
if not ptConf.ptCheckPKG('gimp-2.0 >= ' + ptMinGimp20Version):
ptPrintLog(True,ptLogFile, ptBoldRed,
'gimp-2.0 >= ' + ptMinGimp20Version + ' not found.')
ptPrintLog(True,ptLogFile,ptBoldRed,
'Found : ' + ptConf.ptGetPKGOutput('gimp-2.0')[0])
ptPrintLog(True,ptLogFile,ptBoldRed,'Giving up.')
Exit(1)
else :
[ptGimp20VersionString,ptGimp20Flags] = ptConf.ptGetPKGOutput('gimp-2.0')
# Some functions check.
if ptConf.CheckFunc('getc_unlocked'):
ptConf.env.Append(CPPDEFINES = ['-DHAVE_GETC_UNLOCKED'])
if ptConf.CheckFunc('ftello'):
ptConf.env.Append(CPPDEFINES = ['-DHAVE_FTELLO'])
# Version defines.
ptConf.env.Append(CPPDEFINES = ['-DAPPVERSION=\'' + ptAppVersion + '\''])
# Prefix defines.
ptConf.env.Append(CPPDEFINES = \
['-DPREFIX=\'' + ptDefaultEnv['PT_INSTALL_PATH'] + '\''])
# System CIMG
if ptDefaultEnv['PT_WITH_SYSTEMCIMG']:
ptConf.env.Append(CPPDEFINES = ['-DSYSTEM_CIMG'])
# FileMgr
if not ptDefaultEnv['PT_WITH_FILEMGR']:
ptConf.env.Append(CPPDEFINES = ['-DPT_WITHOUT_FILEMGR'])
# Finalize configuration
ptConf.Finish()
# Show summary results.
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lensfun version : ' + ptLensfunVersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'fftw3 version : ' + ptFftw3VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lqr-1 version : ' + ptLqr1VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'glib-2.0 version : ' + ptGlib2VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'exiv2 version : ' + ptExiv2VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lcms2 version : ' + ptLcms2VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'GM version : ' + ptGMVersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'GM Wand version : ' + ptGMWandVersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Qt version : ' + ptQtVersionString)
if ptDefaultEnv['PT_WITH_GIMPPLUGIN']:
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Gimp20 version : ' + ptGimp20VersionString)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lensfun flags : ' + ptLensfunFlags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'fftw3 flags : ' + ptFftw3Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lqr-1 flags : ' + ptLqr1Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'glib-2.0 flags : ' + ptGlib2Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'exiv2 flags : ' + ptExiv2Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'lcms2 flags : ' + ptLcms2Flags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'GM flags : ' + ptGMFlags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'GM Wand flags : ' + ptGMWandFlags)
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Qt flags : ' + ptQtFlags)
if ptDefaultEnv['PT_WITH_GIMPPLUGIN']:
ptPrintLog(ptDoPrint,ptLogFile,ptBoldMagenta,
'Gimp20 flags : ' + ptGimp20Flags)
################################################################################
# Parse all the flags collected up to now.
ptParsedLensfunFlags = ptDefaultEnv.ParseFlags(ptLensfunFlags)
ptDefaultEnv.MergeFlags(ptParsedLensfunFlags)
ptParsedFftw3Flags = ptDefaultEnv.ParseFlags(ptFftw3Flags)
ptDefaultEnv.MergeFlags(ptParsedFftw3Flags)
ptParsedLqr1Flags = ptDefaultEnv.ParseFlags(ptLqr1Flags)
ptDefaultEnv.MergeFlags(ptParsedLqr1Flags)
ptParsedGlib2Flags = ptDefaultEnv.ParseFlags(ptGlib2Flags)
ptDefaultEnv.MergeFlags(ptParsedGlib2Flags)
ptParsedExiv2Flags = ptDefaultEnv.ParseFlags(ptExiv2Flags)
ptDefaultEnv.MergeFlags(ptParsedExiv2Flags)
ptParsedLcms2Flags = ptDefaultEnv.ParseFlags(ptLcms2Flags)
ptDefaultEnv.MergeFlags(ptParsedLcms2Flags)
ptParsedGMFlags = ptDefaultEnv.ParseFlags(ptGMFlags)
ptDefaultEnv.MergeFlags(ptParsedGMFlags)
ptParsedGMWandFlags = ptDefaultEnv.ParseFlags(ptGMWandFlags)
ptDefaultEnv.MergeFlags(ptParsedGMWandFlags)
ptParsedQtFlags = ptDefaultEnv.ParseFlags(ptQtFlags)
ptDefaultEnv.MergeFlags(ptParsedQtFlags)
if ptDefaultEnv['PT_WITH_GIMPPLUGIN']:
ptParsedGimp20Flags = ptDefaultEnv.ParseFlags(ptGimp20Flags)
ptDefaultEnv.MergeFlags(ptParsedGimp20Flags)
################################################################################
# Command printing via a wrapper function for decorating and logging.
# After the configure checks, in order not to pollute the log.
ptDefaultEnv['PRINT_CMD_LINE_FUNC'] = ptPrintCmdLine
################################################################################
# Pure for scons printing recognition.
ptDefaultEnv.Append(CXXFLAGS = ['-DSCONS_CXX'])
ptDefaultEnv.Append(CCFLAGS = ['-DSCONS_CC'])
ptDefaultEnv.Append(LINKFLAGS = ['-DSCONS_LINK'])
################################################################################
# Common settings for compiler and linker.
ptDefaultEnv.Append(CCFLAGS = ['-ffast-math'])
ptDefaultEnv.Append(CCFLAGS = ['-Wall'])
ptDefaultEnv.Append(CCFLAGS = ['-Werror'])
ptDefaultEnv.Append(CCFLAGS = ['-Wextra'])
ptDefaultEnv.Append(CXXFLAGS = ['-std=gnu++0x'])
if ptDefaultEnv['PT_OMP']:
ptDefaultEnv.Append(CCFLAGS = ['-fopenmp'])
ptDefaultEnv.Append(LIBS = ['gomp','pthread'])
if ptDefaultEnv['PT_RELEASE'] == True:
ptDefaultEnv.Append(CCFLAGS = ['-O3'])
ptDefaultEnv.Append(CCFLAGS = ['-funroll-loops', '-ftree-vectorize'])
ptDefaultEnv.Append(CCFLAGS = ['-DQT_NO_DEBUG'])
if ptDefaultEnv['PT_TARGET_PLATFORM'] not in 'darwin' :
ptDefaultEnv.Append(LINKFLAGS = ['-Wl,-O1'])
else:
ptDefaultEnv.Append(CCFLAGS = ['-g'])
ptDefaultEnv.Append(CCFLAGS = ['-DQT_DEBUG'])
if ptDefaultEnv['PT_TARGET_PLATFORM'] not in 'darwin' :
ptDefaultEnv.Append(CCFLAGS = ['-O1'])
if ptDefaultEnv['PT_TARGET_PLATFORM'] in ['win32'] :
ptDefaultEnv.Append(LIBS = ['ole32','wsock32','expat','gdi32','iconv'])
if ptDefaultEnv['PT_WITH_CONSOLE'] == True:
ptDefaultEnv.Append(LINKFLAGS = ['-Wl,-subsystem,console'])
else:
ptDefaultEnv.Append(LINKFLAGS = ['-Wl,-subsystem,windows'])
if ptDefaultEnv['PT_TARGET_ARCHITECTURE'] not in ['x86_64'] :
# This can go wild ? XXX JDLA We set it i686 without actually knowing ?
ptDefaultEnv.Append(CCFLAGS = ['-march=i686'])
################################################################################
# Make a qt4 env.
# XXX JDLA TODO Not fully understood why needed : in any
# case when not doing so, .qrc (rcc) fails to be recognized ...
ptQtEnv = ptDefaultEnv.Clone();
ptQtEnv.Tool('qt4')
################################################################################
# Subsidiary scripts in a variant build.
SConscript(os.path.join('Sources','SConscript'),
variant_dir = os.path.join('Build',
os.path.join(ptDefaultEnv['PT_BUILD_CONF_NAME'],
'Build_Photivo')),
exports = 'ptQtEnv')
################################################################################
# Install
if ptDefaultEnv['PT_INSTALL_MODE'] == 'Original' :
ptOrgList = []
ptTgtList = []
# binaries.
if ptDefaultEnv['PT_TARGET_PLATFORM'] in ['darwin','posix'] :
ptOrgList += ['photivo']
ptOrgList += ['ptClear']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + '/bin/photivo']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + '/bin/ptclear']
if ptDefaultEnv['PT_TARGET_PLATFORM'] in ['win32']:
ptOrgList += ['photivo.exe']
ptOrgList += ['ptClear.exe']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + '/bin/photivo.exe']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + '/bin/ptclear.exe']
# desktop. (twice : also in .local
ptOrgList += ['ReferenceMaterial/photivo.desktop']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + \
'/share/applications/photivo.desktop']
ptOrgList += ['ReferenceMaterial/photivo.desktop']
ptTgtList += ['~/.local/share/applications/photivo.desktop']
# icon.
ptOrgList += ['qrc/photivo-appicon.png']
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + \
'/share/pixmap/photivo-appicon.png']
# Curves etc ..
for Dir in ['Curves','ChannelMixers','Presets','Profiles','Translations',
'LensfunDatabase','UISettings','Themes']:
for Root,DirNames,FileNames in os.walk(Dir):
for FileName in FileNames:
ptOrgList += [os.path.join(Root,FileName)]
ptTgtList += [ptDefaultEnv['PT_INSTALL_PATH'] + \
'/share/photivo/' + os.path.join(Root,FileName)]
if ptDefaultEnv['PT_HOST_PLATFORM'] == 'posix' and \
ptDefaultEnv['PT_TARGET_PLATFORM'] == 'posix' :
ptDefaultEnv.Alias('install',ptDefaultEnv['PT_INSTALL_PATH'])
ptDefaultEnv.InstallAs(ptTgtList,ptOrgList)
################################################################################
# import script for building .app bundle
# XXX JDLA TODO : Integrate better.
if ptDefaultEnv['PT_TARGET_PLATFORM'] in ['darwin'] :
import osx_app_bundle
################################################################################
| Python |
"""SCons.Util
Various utility functions go here.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Util.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os
import sys
import copy
import re
import types
from collections import UserDict, UserList, UserString
# Don't "from types import ..." these because we need to get at the
# types module later to look for UnicodeType.
InstanceType = types.InstanceType
MethodType = types.MethodType
FunctionType = types.FunctionType
try: unicode
except NameError: UnicodeType = None
else: UnicodeType = unicode
def dictify(keys, values, result={}):
for k, v in zip(keys, values):
result[k] = v
return result
_altsep = os.altsep
if _altsep is None and sys.platform == 'win32':
# My ActivePython 2.0.1 doesn't set os.altsep! What gives?
_altsep = '/'
if _altsep:
def rightmost_separator(path, sep):
return max(path.rfind(sep), path.rfind(_altsep))
else:
def rightmost_separator(path, sep):
return path.rfind(sep)
# First two from the Python Cookbook, just for completeness.
# (Yeah, yeah, YAGNI...)
def containsAny(str, set):
"""Check whether sequence str contains ANY of the items in set."""
for c in set:
if c in str: return 1
return 0
def containsAll(str, set):
"""Check whether sequence str contains ALL of the items in set."""
for c in set:
if c not in str: return 0
return 1
def containsOnly(str, set):
"""Check whether sequence str contains ONLY items in set."""
for c in str:
if c not in set: return 0
return 1
def splitext(path):
"Same as os.path.splitext() but faster."
sep = rightmost_separator(path, os.sep)
dot = path.rfind('.')
# An ext is only real if it has at least one non-digit char
if dot > sep and not containsOnly(path[dot:], "0123456789."):
return path[:dot],path[dot:]
else:
return path,""
def updrive(path):
"""
Make the drive letter (if any) upper case.
This is useful because Windows is inconsitent on the case
of the drive letter, which can cause inconsistencies when
calculating command signatures.
"""
drive, rest = os.path.splitdrive(path)
if drive:
path = drive.upper() + rest
return path
class NodeList(UserList):
"""This class is almost exactly like a regular list of Nodes
(actually it can hold any object), with one important difference.
If you try to get an attribute from this list, it will return that
attribute from every item in the list. For example:
>>> someList = NodeList([ ' foo ', ' bar ' ])
>>> someList.strip()
[ 'foo', 'bar' ]
"""
def __nonzero__(self):
return len(self.data) != 0
def __str__(self):
return ' '.join(map(str, self.data))
def __iter__(self):
return iter(self.data)
def __call__(self, *args, **kwargs):
result = [x(*args, **kwargs) for x in self.data]
return self.__class__(result)
def __getattr__(self, name):
result = [getattr(x, name) for x in self.data]
return self.__class__(result)
_get_env_var = re.compile(r'^\$([_a-zA-Z]\w*|{[_a-zA-Z]\w*})$')
def get_environment_var(varstr):
"""Given a string, first determine if it looks like a reference
to a single environment variable, like "$FOO" or "${FOO}".
If so, return that variable with no decorations ("FOO").
If not, return None."""
mo=_get_env_var.match(to_String(varstr))
if mo:
var = mo.group(1)
if var[0] == '{':
return var[1:-1]
else:
return var
else:
return None
class DisplayEngine(object):
print_it = True
def __call__(self, text, append_newline=1):
if not self.print_it:
return
if append_newline: text = text + '\n'
try:
sys.stdout.write(unicode(text))
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def set_mode(self, mode):
self.print_it = mode
def render_tree(root, child_func, prune=0, margin=[0], visited={}):
"""
Render a tree of nodes into an ASCII tree view.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
children = child_func(root)
retval = ""
for pipe in margin[:-1]:
if pipe:
retval = retval + "| "
else:
retval = retval + " "
if rname in visited:
return retval + "+-[" + rname + "]\n"
retval = retval + "+-" + rname + "\n"
if not prune:
visited = copy.copy(visited)
visited[rname] = 1
for i in range(len(children)):
margin.append(i<len(children)-1)
retval = retval + render_tree(children[i], child_func, prune, margin, visited
)
margin.pop()
return retval
IDX = lambda N: N and 1 or 0
def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited={}):
"""
Print a tree of nodes. This is like render_tree, except it prints
lines directly instead of creating a string representation in memory,
so that huge trees can be printed.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
showtags - print status information to the left of each node line
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
if showtags:
if showtags == 2:
legend = (' E = exists\n' +
' R = exists in repository only\n' +
' b = implicit builder\n' +
' B = explicit builder\n' +
' S = side effect\n' +
' P = precious\n' +
' A = always build\n' +
' C = current\n' +
' N = no clean\n' +
' H = no cache\n' +
'\n')
sys.stdout.write(unicode(legend))
tags = ['[']
tags.append(' E'[IDX(root.exists())])
tags.append(' R'[IDX(root.rexists() and not root.exists())])
tags.append(' BbB'[[0,1][IDX(root.has_explicit_builder())] +
[0,2][IDX(root.has_builder())]])
tags.append(' S'[IDX(root.side_effect)])
tags.append(' P'[IDX(root.precious)])
tags.append(' A'[IDX(root.always_build)])
tags.append(' C'[IDX(root.is_up_to_date())])
tags.append(' N'[IDX(root.noclean)])
tags.append(' H'[IDX(root.nocache)])
tags.append(']')
else:
tags = []
def MMM(m):
return [" ","| "][m]
margins = list(map(MMM, margin[:-1]))
children = child_func(root)
if prune and rname in visited and children:
sys.stdout.write(''.join(tags + margins + ['+-[', rname, ']']) + u'\n')
return
sys.stdout.write(''.join(tags + margins + ['+-', rname]) + u'\n')
visited[rname] = 1
if children:
margin.append(1)
idx = IDX(showtags)
for C in children[:-1]:
print_tree(C, child_func, prune, idx, margin, visited)
margin[-1] = 0
print_tree(children[-1], child_func, prune, idx, margin, visited)
margin.pop()
# Functions for deciding if things are like various types, mainly to
# handle UserDict, UserList and UserString like their underlying types.
#
# Yes, all of this manual testing breaks polymorphism, and the real
# Pythonic way to do all of this would be to just try it and handle the
# exception, but handling the exception when it's not the right type is
# often too slow.
# We are using the following trick to speed up these
# functions. Default arguments are used to take a snapshot of the
# the global functions and constants used by these functions. This
# transforms accesses to global variable into local variables
# accesses (i.e. LOAD_FAST instead of LOAD_GLOBAL).
DictTypes = (dict, UserDict)
ListTypes = (list, UserList)
SequenceTypes = (list, tuple, UserList)
# Note that profiling data shows a speed-up when comparing
# explicitely with str and unicode instead of simply comparing
# with basestring. (at least on Python 2.5.1)
StringTypes = (str, unicode, UserString)
# Empirically, it is faster to check explicitely for str and
# unicode than for basestring.
BaseStringTypes = (str, unicode)
def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes):
return isinstance(obj, DictTypes)
def is_List(obj, isinstance=isinstance, ListTypes=ListTypes):
return isinstance(obj, ListTypes)
def is_Sequence(obj, isinstance=isinstance, SequenceTypes=SequenceTypes):
return isinstance(obj, SequenceTypes)
def is_Tuple(obj, isinstance=isinstance, tuple=tuple):
return isinstance(obj, tuple)
def is_String(obj, isinstance=isinstance, StringTypes=StringTypes):
return isinstance(obj, StringTypes)
def is_Scalar(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes):
# Profiling shows that there is an impressive speed-up of 2x
# when explicitely checking for strings instead of just not
# sequence when the argument (i.e. obj) is already a string.
# But, if obj is a not string then it is twice as fast to
# check only for 'not sequence'. The following code therefore
# assumes that the obj argument is a string must of the time.
return isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes)
def do_flatten(sequence, result, isinstance=isinstance,
StringTypes=StringTypes, SequenceTypes=SequenceTypes):
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
def flatten(obj, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes):
return [obj]
result = []
for item in obj:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
def flatten_sequence(sequence, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
result = []
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
def to_String(s,
isinstance=isinstance, str=str,
UserString=UserString, BaseStringTypes=BaseStringTypes):
if isinstance(s,BaseStringTypes):
# Early out when already a string!
return s
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_subst(s,
isinstance=isinstance, str=str, to_String=to_String,
BaseStringTypes=BaseStringTypes, SequenceTypes=SequenceTypes,
UserString=UserString):
# Note that the test cases are sorted by order of probability.
if isinstance(s, BaseStringTypes):
return s
elif isinstance(s, SequenceTypes):
l = []
for e in s:
l.append(to_String_for_subst(e))
return ' '.join( s )
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_signature(obj, to_String_for_subst=to_String_for_subst,
AttributeError=AttributeError):
try:
f = obj.for_signature
except AttributeError:
return to_String_for_subst(obj)
else:
return f()
# The SCons "semi-deep" copy.
#
# This makes separate copies of lists (including UserList objects)
# dictionaries (including UserDict objects) and tuples, but just copies
# references to anything else it finds.
#
# A special case is any object that has a __semi_deepcopy__() method,
# which we invoke to create the copy. Currently only used by
# BuilderDict to actually prevent the copy operation (as invalid on that object)
#
# The dispatch table approach used here is a direct rip-off from the
# normal Python copy module.
_semi_deepcopy_dispatch = d = {}
def semi_deepcopy_dict(x, exclude = [] ):
copy = {}
for key, val in x.items():
# The regular Python copy.deepcopy() also deepcopies the key,
# as follows:
#
# copy[semi_deepcopy(key)] = semi_deepcopy(val)
#
# Doesn't seem like we need to, but we'll comment it just in case.
if key not in exclude:
copy[key] = semi_deepcopy(val)
return copy
d[dict] = semi_deepcopy_dict
def _semi_deepcopy_list(x):
return list(map(semi_deepcopy, x))
d[list] = _semi_deepcopy_list
def _semi_deepcopy_tuple(x):
return tuple(map(semi_deepcopy, x))
d[tuple] = _semi_deepcopy_tuple
def semi_deepcopy(x):
copier = _semi_deepcopy_dispatch.get(type(x))
if copier:
return copier(x)
else:
if hasattr(x, '__semi_deepcopy__') and callable(x.__semi_deepcopy__):
return x.__semi_deepcopy__()
elif isinstance(x, UserDict):
return x.__class__(semi_deepcopy_dict(x))
elif isinstance(x, UserList):
return x.__class__(_semi_deepcopy_list(x))
return x
class Proxy(object):
"""A simple generic Proxy class, forwarding all calls to
subject. So, for the benefit of the python newbie, what does
this really mean? Well, it means that you can take an object, let's
call it 'objA', and wrap it in this Proxy class, with a statement
like this
proxyObj = Proxy(objA),
Then, if in the future, you do something like this
x = proxyObj.var1,
since Proxy does not have a 'var1' attribute (but presumably objA does),
the request actually is equivalent to saying
x = objA.var1
Inherit from this class to create a Proxy.
Note that, with new-style classes, this does *not* work transparently
for Proxy subclasses that use special .__*__() method names, because
those names are now bound to the class, not the individual instances.
You now need to know in advance which .__*__() method names you want
to pass on to the underlying Proxy object, and specifically delegate
their calls like this:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, subject):
"""Wrap an object as a Proxy object"""
self._subject = subject
def __getattr__(self, name):
"""Retrieve an attribute from the wrapped object. If the named
attribute doesn't exist, AttributeError is raised"""
return getattr(self._subject, name)
def get(self):
"""Retrieve the entire wrapped object"""
return self._subject
def __cmp__(self, other):
if issubclass(other.__class__, self._subject.__class__):
return cmp(self._subject, other)
return cmp(self.__dict__, other.__dict__)
class Delegate(object):
"""A Python Descriptor class that delegates attribute fetches
to an underlying wrapped subject of a Proxy. Typical use:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, attribute):
self.attribute = attribute
def __get__(self, obj, cls):
if isinstance(obj, cls):
return getattr(obj._subject, self.attribute)
else:
return self
# attempt to load the windows registry module:
can_read_reg = 0
try:
import winreg
can_read_reg = 1
hkey_mod = winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegQueryValueEx = winreg.QueryValueEx
RegError = winreg.error
except ImportError:
try:
import win32api
import win32con
can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegQueryValueEx = win32api.RegQueryValueEx
RegError = win32api.error
except ImportError:
class _NoError(Exception):
pass
RegError = _NoError
if can_read_reg:
HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT
HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE
HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER
HKEY_USERS = hkey_mod.HKEY_USERS
def RegGetValue(root, key):
"""This utility function returns a value in the registry
without having to open the key first. Only available on
Windows platforms with a version of Python that can read the
registry. Returns the same thing as
SCons.Util.RegQueryValueEx, except you just specify the entire
path to the value, and don't have to bother opening the key
first. So:
Instead of:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion')
out = SCons.Util.RegQueryValueEx(k,
'ProgramFilesDir')
You can write:
out = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion\ProgramFilesDir')
"""
# I would use os.path.split here, but it's not a filesystem
# path...
p = key.rfind('\\') + 1
keyp = key[:p-1] # -1 to omit trailing slash
val = key[p:]
k = RegOpenKeyEx(root, keyp)
return RegQueryValueEx(k,val)
else:
try:
e = WindowsError
except NameError:
# Make sure we have a definition of WindowsError so we can
# run platform-independent tests of Windows functionality on
# platforms other than Windows. (WindowsError is, in fact, an
# OSError subclass on Windows.)
class WindowsError(OSError):
pass
import builtins
builtins.WindowsError = WindowsError
else:
del e
HKEY_CLASSES_ROOT = None
HKEY_LOCAL_MACHINE = None
HKEY_CURRENT_USER = None
HKEY_USERS = None
def RegGetValue(root, key):
raise WindowsError
def RegOpenKeyEx(root, key):
raise WindowsError
if sys.platform == 'win32':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
try:
pathext = os.environ['PATHEXT']
except KeyError:
pathext = '.COM;.EXE;.BAT;.CMD'
if is_String(pathext):
pathext = pathext.split(os.pathsep)
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
elif os.name == 'os2':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
pathext = ['.exe', '.cmd']
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
else:
def WhereIs(file, path=None, pathext=None, reject=[]):
import stat
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for d in path:
f = os.path.join(d, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
# os.stat() raises OSError, not IOError if the file
# doesn't exist, so in this case we let IOError get
# raised so as to not mask possibly serious disk or
# network issues.
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
try:
reject.index(f)
except ValueError:
return os.path.normpath(f)
continue
return None
def PrependPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This prepends newpath elements to the given oldpath. Will only
add any particular path once (leaving the first one it encounters
and ignoring the rest, to preserve path order), and will
os.path.normpath and os.path.normcase all paths to help assure
this. This can also handle the case where the given old path
variable is a list instead of a string, in which case a list will
be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/biz/boom:/foo:/foo/bar"
If delete_existing is 0, then adding a path that exists will
not move it to the beginning; it will stay where it is in the
list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# First uniquify the old paths, making sure to
# preserve the first instance (in Unix/Linux,
# the first one wins), and remembering them in normpaths.
# Then insert the new paths at the head of the list
# if they're not already in the normpaths list.
result = []
normpaths = []
for path in paths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
newpaths.reverse() # since we're inserting at the head
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.insert(0, path)
normpaths.append(normpath)
paths = result
else:
newpaths = newpaths + paths # prepend new paths
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
if is_list:
return paths
else:
return sep.join(paths)
def AppendPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This appends new path elements to the given old path. Will
only add any particular path once (leaving the last one it
encounters and ignoring the rest, to preserve path order), and
will os.path.normpath and os.path.normcase all paths to help
assure this. This can also handle the case where the given old
path variable is a list instead of a string, in which case a list
will be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/foo/bar:/biz/boom:/foo"
If delete_existing is 0, then adding a path that exists
will not move it to the end; it will stay where it is in the list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# add old paths to result, then
# add new paths if not already present
# (I thought about using a dict for normpaths for speed,
# but it's not clear hashing the strings would be faster
# than linear searching these typically short lists.)
result = []
normpaths = []
for path in paths:
if not path:
continue
result.append(path)
normpaths.append(os.path.normpath(os.path.normcase(path)))
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
paths = result
else:
# start w/ new paths, add old ones if not present,
# then reverse.
newpaths = paths + newpaths # append new paths
newpaths.reverse()
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
paths.reverse()
if is_list:
return paths
else:
return sep.join(paths)
if sys.platform == 'cygwin':
def get_native_path(path):
"""Transforms an absolute path into a native path for the system. In
Cygwin, this converts from a Cygwin path to a Windows one."""
return os.popen('cygpath -w ' + path).read().replace('\n', '')
else:
def get_native_path(path):
"""Transforms an absolute path into a native path for the system.
Non-Cygwin version, just leave the path alone."""
return path
display = DisplayEngine()
def Split(arg):
if is_List(arg) or is_Tuple(arg):
return arg
elif is_String(arg):
return arg.split()
else:
return [arg]
class CLVar(UserList):
"""A class for command-line construction variables.
This is a list that uses Split() to split an initial string along
white-space arguments, and similarly to split any strings that get
added. This allows us to Do the Right Thing with Append() and
Prepend() (as well as straight Python foo = env['VAR'] + 'arg1
arg2') regardless of whether a user adds a list or a string to a
command-line construction variable.
"""
def __init__(self, seq = []):
UserList.__init__(self, Split(seq))
def __add__(self, other):
return UserList.__add__(self, CLVar(other))
def __radd__(self, other):
return UserList.__radd__(self, CLVar(other))
def __coerce__(self, other):
return (self, CLVar(other))
def __str__(self):
return ' '.join(self.data)
# A dictionary that preserves the order in which items are added.
# Submitted by David Benjamin to ActiveState's Python Cookbook web site:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
# Including fixes/enhancements from the follow-on discussions.
class OrderedDict(UserDict):
def __init__(self, dict = None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = OrderedDict()
dict.update(self)
return dict
def items(self):
return list(zip(self._keys, list(self.values())))
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
UserDict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self.__setitem__(key, val)
def values(self):
return list(map(self.get, self._keys))
class Selector(OrderedDict):
"""A callable ordered dictionary that maps file suffixes to
dictionary values. We preserve the order in which items are added
so that get_suffix() calls always return the first suffix added."""
def __call__(self, env, source, ext=None):
if ext is None:
try:
ext = source[0].suffix
except IndexError:
ext = ""
try:
return self[ext]
except KeyError:
# Try to perform Environment substitution on the keys of
# the dictionary before giving up.
s_dict = {}
for (k,v) in self.items():
if k is not None:
s_k = env.subst(k)
if s_k in s_dict:
# We only raise an error when variables point
# to the same suffix. If one suffix is literal
# and a variable suffix contains this literal,
# the literal wins and we don't raise an error.
raise KeyError(s_dict[s_k][0], k, s_k)
s_dict[s_k] = (k,v)
try:
return s_dict[ext][1]
except KeyError:
try:
return self[None]
except KeyError:
return None
if sys.platform == 'cygwin':
# On Cygwin, os.path.normcase() lies, so just report back the
# fact that the underlying Windows OS is case-insensitive.
def case_sensitive_suffixes(s1, s2):
return 0
else:
def case_sensitive_suffixes(s1, s2):
return (os.path.normcase(s1) != os.path.normcase(s2))
def adjustixes(fname, pre, suf, ensure_suffix=False):
if pre:
path, fn = os.path.split(os.path.normpath(fname))
if fn[:len(pre)] != pre:
fname = os.path.join(path, pre + fn)
# Only append a suffix if the suffix we're going to add isn't already
# there, and if either we've been asked to ensure the specific suffix
# is present or there's no suffix on it at all.
if suf and fname[-len(suf):] != suf and \
(ensure_suffix or not splitext(fname)[1]):
fname = fname + suf
return fname
# From Tim Peters,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# (Also in the printed Python Cookbook.)
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
pass # move on to the next method
else:
return list(u.keys())
del u
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = sorted(s)
except TypeError:
pass # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti = lasti + 1
i = i + 1
return t[:lasti]
del t
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# A more efficient implementation of Alex's uniquer(), this avoids the
# idfun() argument and function-call overhead by assuming that all
# items in the sequence are hashable.
def uniquer_hashables(seq):
seen = {}
result = []
for item in seq:
#if not item in seen:
if item not in seen:
seen[item] = 1
result.append(item)
return result
# Much of the logic here was originally based on recipe 4.9 from the
# Python CookBook, but we had to dumb it way down for Python 1.5.2.
class LogicalLines(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def readline(self):
result = []
while True:
line = self.fileobj.readline()
if not line:
break
if line[-2:] == '\\\n':
result.append(line[:-2])
else:
result.append(line)
break
return ''.join(result)
def readlines(self):
result = []
while True:
line = self.readline()
if not line:
break
result.append(line)
return result
class UniqueList(UserList):
def __init__(self, seq = []):
UserList.__init__(self, seq)
self.unique = True
def __make_unique(self):
if not self.unique:
self.data = uniquer_hashables(self.data)
self.unique = True
def __lt__(self, other):
self.__make_unique()
return UserList.__lt__(self, other)
def __le__(self, other):
self.__make_unique()
return UserList.__le__(self, other)
def __eq__(self, other):
self.__make_unique()
return UserList.__eq__(self, other)
def __ne__(self, other):
self.__make_unique()
return UserList.__ne__(self, other)
def __gt__(self, other):
self.__make_unique()
return UserList.__gt__(self, other)
def __ge__(self, other):
self.__make_unique()
return UserList.__ge__(self, other)
def __cmp__(self, other):
self.__make_unique()
return UserList.__cmp__(self, other)
def __len__(self):
self.__make_unique()
return UserList.__len__(self)
def __getitem__(self, i):
self.__make_unique()
return UserList.__getitem__(self, i)
def __setitem__(self, i, item):
UserList.__setitem__(self, i, item)
self.unique = False
def __getslice__(self, i, j):
self.__make_unique()
return UserList.__getslice__(self, i, j)
def __setslice__(self, i, j, other):
UserList.__setslice__(self, i, j, other)
self.unique = False
def __add__(self, other):
result = UserList.__add__(self, other)
result.unique = False
return result
def __radd__(self, other):
result = UserList.__radd__(self, other)
result.unique = False
return result
def __iadd__(self, other):
result = UserList.__iadd__(self, other)
result.unique = False
return result
def __mul__(self, other):
result = UserList.__mul__(self, other)
result.unique = False
return result
def __rmul__(self, other):
result = UserList.__rmul__(self, other)
result.unique = False
return result
def __imul__(self, other):
result = UserList.__imul__(self, other)
result.unique = False
return result
def append(self, item):
UserList.append(self, item)
self.unique = False
def insert(self, i):
UserList.insert(self, i)
self.unique = False
def count(self, item):
self.__make_unique()
return UserList.count(self, item)
def index(self, item):
self.__make_unique()
return UserList.index(self, item)
def reverse(self):
self.__make_unique()
UserList.reverse(self)
def sort(self, *args, **kwds):
self.__make_unique()
return UserList.sort(self, *args, **kwds)
def extend(self, other):
UserList.extend(self, other)
self.unique = False
class Unbuffered(object):
"""
A proxy class that wraps a file object, flushing after every write,
and delegating everything else to the wrapped object.
"""
def __init__(self, file):
self.file = file
self.softspace = 0 ## backward compatibility; not supported in Py3k
def write(self, arg):
try:
self.file.write(arg)
self.file.flush()
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def __getattr__(self, attr):
return getattr(self.file, attr)
def make_path_relative(path):
""" makes an absolute path name to a relative pathname.
"""
if os.path.isabs(path):
drive_s,path = os.path.splitdrive(path)
import re
if not drive_s:
path=re.compile("/*(.*)").findall(path)[0]
else:
path=path[1:]
assert( not os.path.isabs( path ) ), path
return path
# The original idea for AddMethod() and RenameFunction() come from the
# following post to the ActiveState Python Cookbook:
#
# ASPN: Python Cookbook : Install bound methods in an instance
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/223613
#
# That code was a little fragile, though, so the following changes
# have been wrung on it:
#
# * Switched the installmethod() "object" and "function" arguments,
# so the order reflects that the left-hand side is the thing being
# "assigned to" and the right-hand side is the value being assigned.
#
# * Changed explicit type-checking to the "try: klass = object.__class__"
# block in installmethod() below so that it still works with the
# old-style classes that SCons uses.
#
# * Replaced the by-hand creation of methods and functions with use of
# the "new" module, as alluded to in Alex Martelli's response to the
# following Cookbook post:
#
# ASPN: Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
def AddMethod(obj, function, name=None):
"""
Adds either a bound method to an instance or an unbound method to
a class. If name is ommited the name of the specified function
is used by default.
Example:
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print a.z
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print a.listIndex(5)
"""
if name is None:
name = function.func_name
else:
function = RenameFunction(function, name)
if hasattr(obj, '__class__') and obj.__class__ is not type:
# "obj" is an instance, so it gets a bound method.
setattr(obj, name, MethodType(function, obj, obj.__class__))
else:
# "obj" is a class, so it gets an unbound method.
setattr(obj, name, MethodType(function, None, obj))
def RenameFunction(function, name):
"""
Returns a function identical to the specified function, but with
the specified name.
"""
return FunctionType(function.func_code,
function.func_globals,
name,
function.func_defaults)
md5 = False
def MD5signature(s):
return str(s)
def MD5filesignature(fname, chunksize=65536):
f = open(fname, "rb")
result = f.read()
f.close()
return result
try:
import hashlib
except ImportError:
pass
else:
if hasattr(hashlib, 'md5'):
md5 = True
def MD5signature(s):
m = hashlib.md5()
m.update(str(s))
return m.hexdigest()
def MD5filesignature(fname, chunksize=65536):
m = hashlib.md5()
f = open(fname, "rb")
while True:
blck = f.read(chunksize)
if not blck:
break
m.update(str(blck))
f.close()
return m.hexdigest()
def MD5collect(signatures):
"""
Collects a list of signatures into an aggregate signature.
signatures - a list of signatures
returns - the aggregate signature
"""
if len(signatures) == 1:
return signatures[0]
else:
return MD5signature(', '.join(signatures))
def silent_intern(x):
"""
Perform sys.intern() on the passed argument and return the result.
If the input is ineligible (e.g. a unicode string) the original argument is
returned and no exception is thrown.
"""
try:
return sys.intern(x)
except TypeError:
return x
# From Dinu C. Gherman,
# Python Cookbook, second edition, recipe 6.17, p. 277.
# Also:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
# ASPN: Python Cookbook: Null Object Design Pattern
#TODO??? class Null(object):
class Null(object):
""" Null objects always and reliably "do nothing." """
def __new__(cls, *args, **kwargs):
if not '_instance' in vars(cls):
cls._instance = super(Null, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return "Null(0x%08X)" % id(self)
def __nonzero__(self):
return False
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
class NullSeq(Null):
def __len__(self):
return 0
def __iter__(self):
return iter(())
def __getitem__(self, i):
return self
def __delitem__(self, i):
return self
def __setitem__(self, i, v):
return self
del __revision__
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/PathList.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """SCons.PathList
A module for handling lists of directory paths (the sort of things
that get set as CPPPATH, LIBPATH, etc.) with as much caching of data and
efficiency as we can while still keeping the evaluation delayed so that we
Do the Right Thing (almost) regardless of how the variable is specified.
"""
import os
import SCons.Memoize
import SCons.Node
import SCons.Util
#
# Variables to specify the different types of entries in a PathList object:
#
TYPE_STRING_NO_SUBST = 0 # string with no '$'
TYPE_STRING_SUBST = 1 # string containing '$'
TYPE_OBJECT = 2 # other object
def node_conv(obj):
"""
This is the "string conversion" routine that we have our substitutions
use to return Nodes, not strings. This relies on the fact that an
EntryProxy object has a get() method that returns the underlying
Node that it wraps, which is a bit of architectural dependence
that we might need to break or modify in the future in response to
additional requirements.
"""
try:
get = obj.get
except AttributeError:
if isinstance(obj, SCons.Node.Node) or SCons.Util.is_Sequence( obj ):
result = obj
else:
result = str(obj)
else:
result = get()
return result
class _PathList(object):
"""
An actual PathList object.
"""
def __init__(self, pathlist):
"""
Initializes a PathList object, canonicalizing the input and
pre-processing it for quicker substitution later.
The stored representation of the PathList is a list of tuples
containing (type, value), where the "type" is one of the TYPE_*
variables defined above. We distinguish between:
strings that contain no '$' and therefore need no
delayed-evaluation string substitution (we expect that there
will be many of these and that we therefore get a pretty
big win from avoiding string substitution)
strings that contain '$' and therefore need substitution
(the hard case is things like '${TARGET.dir}/include',
which require re-evaluation for every target + source)
other objects (which may be something like an EntryProxy
that needs a method called to return a Node)
Pre-identifying the type of each element in the PathList up-front
and storing the type in the list of tuples is intended to reduce
the amount of calculation when we actually do the substitution
over and over for each target.
"""
if SCons.Util.is_String(pathlist):
pathlist = pathlist.split(os.pathsep)
elif not SCons.Util.is_Sequence(pathlist):
pathlist = [pathlist]
pl = []
for p in pathlist:
try:
index = p.find('$')
except (AttributeError, TypeError):
type = TYPE_OBJECT
else:
if index == -1:
type = TYPE_STRING_NO_SUBST
else:
type = TYPE_STRING_SUBST
pl.append((type, p))
self.pathlist = tuple(pl)
def __len__(self): return len(self.pathlist)
def __getitem__(self, i): return self.pathlist[i]
def subst_path(self, env, target, source):
"""
Performs construction variable substitution on a pre-digested
PathList for a specific target and source.
"""
result = []
for type, value in self.pathlist:
if type == TYPE_STRING_SUBST:
value = env.subst(value, target=target, source=source,
conv=node_conv)
if SCons.Util.is_Sequence(value):
result.extend(value)
continue
elif type == TYPE_OBJECT:
value = node_conv(value)
if value:
result.append(value)
return tuple(result)
class PathListCache(object):
"""
A class to handle caching of PathList lookups.
This class gets instantiated once and then deleted from the namespace,
so it's used as a Singleton (although we don't enforce that in the
usual Pythonic ways). We could have just made the cache a dictionary
in the module namespace, but putting it in this class allows us to
use the same Memoizer pattern that we use elsewhere to count cache
hits and misses, which is very valuable.
Lookup keys in the cache are computed by the _PathList_key() method.
Cache lookup should be quick, so we don't spend cycles canonicalizing
all forms of the same lookup key. For example, 'x:y' and ['x',
'y'] logically represent the same list, but we don't bother to
split string representations and treat those two equivalently.
(Note, however, that we do, treat lists and tuples the same.)
The main type of duplication we're trying to catch will come from
looking up the same path list from two different clones of the
same construction environment. That is, given
env2 = env1.Clone()
both env1 and env2 will have the same CPPPATH value, and we can
cheaply avoid re-parsing both values of CPPPATH by using the
common value from this cache.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self):
self._memo = {}
def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist
memoizer_counters.append(SCons.Memoize.CountDict('PathList', _PathList_key))
def PathList(self, pathlist):
"""
Returns the cached _PathList object for the specified pathlist,
creating and caching a new object as necessary.
"""
pathlist = self._PathList_key(pathlist)
try:
memo_dict = self._memo['PathList']
except KeyError:
memo_dict = {}
self._memo['PathList'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
result = _PathList(pathlist)
memo_dict[pathlist] = result
return result
PathList = PathListCache().PathList
del PathListCache
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.SConf
Autoconf-like configuration support.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConf.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.compat
import io
import os
import re
import sys
import traceback
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode)
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', str(target[0]).upper())
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
source[0].get_contents().replace( '\n', "\n |" ) )
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer(object):
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = io.StringIO()
def write(self, str):
if self.orig:
self.orig.write(unicode(str))
self.s.write(unicode(str))
def writelines(self, lines):
for l in lines:
self.write(unicode(l + '\n'))
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
if env.decide_source.func_code is not force_build.func_code:
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase(object):
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise SCons.Errors.UserError
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines)
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper(object):
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise SCons.Errors.UserError
context = CheckContext(self.sconf)
ret = self.test(context, *args, **kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if self.logfile in _ac_config_logs:
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext(object):
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'yes' or 'no'.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, (int, bool)):
if res:
text = "yes"
else:
text = "no"
elif isinstance(res, str):
text = res
else:
raise TypeError("Expected string, int or bool, got " + str(type(res)))
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return self.sconf.TryBuild(*args, **kw)
def TryAction(self, *args, **kw):
return self.sconf.TryAction(*args, **kw)
def TryCompile(self, *args, **kw):
return self.sconf.TryCompile(*args, **kw)
def TryLink(self, *args, **kw):
return self.sconf.TryLink(*args, **kw)
def TryRun(self, *args, **kw):
return self.sconf.TryRun(*args, **kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError("CheckContext instance has no attribute '%s'" % attr)
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return SConfBase(*args, **kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return ''.join(l), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import division
__revision__ = "src/engine/SCons/Defaults.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os
import errno
import shutil
import stat
import time
import sys
import SCons.Action
import SCons.Builder
import SCons.CacheDir
import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def _fetch_DefaultEnvironment(*args, **kw):
"""
Returns the already-created default construction environment.
"""
global _default_env
return _default_env
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError("Source file: %s is static and is not compatible with shared target: %s" % (src, target[0]))
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# These aren't really tool scanners, so they don't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else
# they should go. Leave them here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
def get_paths_str(dest):
# If dest is a list, we need to manually call str() on each element
if SCons.Util.is_List(dest):
elem_strs = []
for element in dest:
elem_strs.append('"' + str(element) + '"')
return '[' + ', '.join(elem_strs) + ']'
else:
return '"' + str(dest) + '"'
def chmod_func(dest, mode):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for element in dest:
os.chmod(str(element), mode)
def chmod_strfunc(dest, mode):
return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode)
Chmod = ActionFactory(chmod_func, chmod_strfunc)
def copy_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, 1)
Copy = ActionFactory(copy_func,
lambda dest, src: 'Copy("%s", "%s")' % (dest, src),
convert=str)
def delete_func(dest, must_exist=0):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
entry = str(entry)
# os.path.exists returns False with broken links that exist
entry_exists = os.path.exists(entry) or os.path.islink(entry)
if not entry_exists and not must_exist:
continue
# os.path.isdir returns True when entry is a link to a dir
if os.path.isdir(entry) and not os.path.islink(entry):
shutil.rmtree(entry, 1)
continue
os.unlink(entry)
def delete_strfunc(dest, must_exist=0):
return 'Delete(%s)' % get_paths_str(dest)
Delete = ActionFactory(delete_func, delete_strfunc)
def mkdir_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
try:
os.makedirs(str(entry))
except os.error, e:
p = str(entry)
if (e.args[0] == errno.EEXIST or
(sys.platform=='win32' and e.args[0]==183)) \
and os.path.isdir(str(entry)):
pass # not an error if already exists
else:
raise
Mkdir = ActionFactory(mkdir_func,
lambda dir: 'Mkdir(%s)' % get_paths_str(dir))
def move_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
SCons.Node.FS.invalidate_node_memos(src)
shutil.move(src, dest)
Move = ActionFactory(move_func,
lambda dest, src: 'Move("%s", "%s")' % (dest, src),
convert=str)
def touch_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for file in dest:
file = str(file)
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch(%s)' % get_paths_str(file))
# Internal utility functions
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if l is not None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env)
def processDefines(defs):
"""process defines, resolving strings, lists, dictionaries, into a list of
strings
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if d is None:
continue
elif SCons.Util.is_List(d) or isinstance(d, tuple):
if len(d) >= 2:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d[0]))
elif SCons.Util.is_Dict(d):
for macro,value in d.iteritems():
if value is not None:
l.append(str(macro) + '=' + str(value))
else:
l.append(str(macro))
elif SCons.Util.is_String(d):
l.append(str(d))
else:
raise SCons.Errors.UserError("DEFINE %s is not a list, dict, string or None."%repr(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
for k,v in sorted(defs.items()):
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return l
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
return c(prefix, env.subst_path(processDefines(defs)), suffix, env)
class NullCmdGenerator(object):
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller(object):
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
variable = self.variable
while frame:
if variable in frame.f_locals:
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return method(*args, **kw)
frame = frame.f_back
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
# 'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes, # moved to the TeX tools generate functions
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/CacheDir.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """
CacheDir support
"""
import os.path
import stat
import sys
import SCons.Action
cache_enabled = True
cache_debug = False
cache_force = False
cache_show = False
def CacheRetrieveFunc(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if not fs.exists(cachefile):
cd.CacheDebug('CacheRetrieve(%s): %s not in cache\n', t, cachefile)
return 1
cd.CacheDebug('CacheRetrieve(%s): retrieving from %s\n', t, cachefile)
if SCons.Action.execute_actions:
if fs.islink(cachefile):
fs.symlink(fs.readlink(cachefile), t.path)
else:
env.copy_from_cache(cachefile, t.path)
st = fs.stat(cachefile)
fs.chmod(t.path, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return 0
def CacheRetrieveString(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if t.fs.exists(cachefile):
return "Retrieved `%s' from cache" % t.path
return None
CacheRetrieve = SCons.Action.Action(CacheRetrieveFunc, CacheRetrieveString)
CacheRetrieveSilent = SCons.Action.Action(CacheRetrieveFunc, None)
def CachePushFunc(target, source, env):
t = target[0]
if t.nocache:
return
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if fs.exists(cachefile):
# Don't bother copying it if it's already there. Note that
# usually this "shouldn't happen" because if the file already
# existed in cache, we'd have retrieved the file from there,
# not built it. This can happen, though, in a race, if some
# other person running the same build pushes their copy to
# the cache after we decide we need to build it but before our
# build completes.
cd.CacheDebug('CachePush(%s): %s already exists in cache\n', t, cachefile)
return
cd.CacheDebug('CachePush(%s): pushing to %s\n', t, cachefile)
tempfile = cachefile+'.tmp'+str(os.getpid())
errfmt = "Unable to copy %s to cache. Cache file is %s"
if not fs.isdir(cachedir):
try:
fs.makedirs(cachedir)
except EnvironmentError:
# We may have received an exception because another process
# has beaten us creating the directory.
if not fs.isdir(cachedir):
msg = errfmt % (str(target), cachefile)
raise SCons.Errors.EnvironmentError(msg)
try:
if fs.islink(t.path):
fs.symlink(fs.readlink(t.path), tempfile)
else:
fs.copy2(t.path, tempfile)
fs.rename(tempfile, cachefile)
st = fs.stat(t.path)
fs.chmod(cachefile, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
except EnvironmentError:
# It's possible someone else tried writing the file at the
# same time we did, or else that there was some problem like
# the CacheDir being on a separate file system that's full.
# In any case, inability to push a file to cache doesn't affect
# the correctness of the build, so just print a warning.
msg = errfmt % (str(target), cachefile)
SCons.Warnings.warn(SCons.Warnings.CacheWriteErrorWarning, msg)
CachePush = SCons.Action.Action(CachePushFunc, None)
class CacheDir(object):
def __init__(self, path):
try:
import hashlib
except ImportError:
msg = "No hashlib or MD5 module available, CacheDir() not supported"
SCons.Warnings.warn(SCons.Warnings.NoMD5ModuleWarning, msg)
self.path = None
else:
self.path = path
self.current_cache_debug = None
self.debugFP = None
def CacheDebug(self, fmt, target, cachefile):
if cache_debug != self.current_cache_debug:
if cache_debug == '-':
self.debugFP = sys.stdout
elif cache_debug:
self.debugFP = open(cache_debug, 'w')
else:
self.debugFP = None
self.current_cache_debug = cache_debug
if self.debugFP:
self.debugFP.write(fmt % (target, os.path.split(cachefile)[1]))
def is_enabled(self):
return (cache_enabled and not self.path is None)
def cachepath(self, node):
"""
"""
if not self.is_enabled():
return None, None
sig = node.get_cachedir_bsig()
subdir = sig[0].upper()
dir = os.path.join(self.path, subdir)
return dir, os.path.join(dir, sig)
def retrieve(self, node):
"""
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Note that there's a special trick here with the execute flag
(one that's not normally done for other actions). Basically
if the user requested a no_exec (-n) build, then
SCons.Action.execute_actions is set to 0 and when any action
is called, it does its showing but then just returns zero
instead of actually calling the action execution operation.
The problem for caching is that if the file does NOT exist in
cache then the CacheRetrieveString won't return anything to
show for the task, but the Action.__call__ won't call
CacheRetrieveFunc; instead it just returns zero, which makes
the code below think that the file *was* successfully
retrieved from the cache, therefore it doesn't do any
subsequent building. However, the CacheRetrieveString didn't
print anything because it didn't actually exist in the cache,
and no more build actions will be performed, so the user just
sees nothing. The fix is to tell Action.__call__ to always
execute the CacheRetrieveFunc and then have the latter
explicitly check SCons.Action.execute_actions itself.
"""
if not self.is_enabled():
return False
env = node.get_build_env()
if cache_show:
if CacheRetrieveSilent(node, [], env, execute=1) == 0:
node.build(presub=0, execute=0)
return True
else:
if CacheRetrieve(node, [], env, execute=1) == 0:
return True
return False
def push(self, node):
if not self.is_enabled():
return
return CachePush(node, [], node.get_build_env())
def push_if_forced(self, node):
if cache_force:
return self.push(node)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.SConsign
Writing and reading information to the .sconsign file or files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConsign.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.compat
import os
# compat layer imports "cPickle" for us if it's available.
import pickle
import SCons.dblite
import SCons.Warnings
def corrupt_dblite_warning(filename):
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt .sconsign file: %s"%filename)
SCons.dblite.ignore_corrupt_dbfiles = 1
SCons.dblite.corruption_warning = corrupt_dblite_warning
#XXX Get rid of the global array so this becomes re-entrant.
sig_files = []
# Info for the database SConsign implementation (now the default):
# "DataBase" is a dictionary that maps top-level SConstruct directories
# to open database handles.
# "DB_Module" is the Python database module to create the handles.
# "DB_Name" is the base name of the database file (minus any
# extension the underlying DB module will add).
DataBase = {}
DB_Module = SCons.dblite
DB_Name = ".sconsign"
DB_sync_list = []
def Get_DataBase(dir):
global DataBase, DB_Module, DB_Name
top = dir.fs.Top
if not os.path.isabs(DB_Name) and top.repositories:
mode = "c"
for d in [top] + top.repositories:
if dir.is_under(d):
try:
return DataBase[d], mode
except KeyError:
path = d.entry_abspath(DB_Name)
try: db = DataBase[d] = DB_Module.open(path, mode)
except (IOError, OSError): pass
else:
if mode != "r":
DB_sync_list.append(db)
return db, mode
mode = "r"
try:
return DataBase[top], "c"
except KeyError:
db = DataBase[top] = DB_Module.open(DB_Name, "c")
DB_sync_list.append(db)
return db, "c"
except TypeError:
print "DataBase =", DataBase
raise
def Reset():
"""Reset global state. Used by unit tests that end up using
SConsign multiple times to get a clean slate for each test."""
global sig_files, DB_sync_list
sig_files = []
DB_sync_list = []
normcase = os.path.normcase
def write():
global sig_files
for sig_file in sig_files:
sig_file.write(sync=0)
for db in DB_sync_list:
try:
syncmethod = db.sync
except AttributeError:
pass # Not all dbm modules have sync() methods.
else:
syncmethod()
try:
closemethod = db.close
except AttributeError:
pass # Not all dbm modules have close() methods.
else:
closemethod()
class SConsignEntry(object):
"""
Wrapper class for the generic entry in a .sconsign file.
The Node subclass populates it with attributes as it pleases.
XXX As coded below, we do expect a '.binfo' attribute to be added,
but we'll probably generalize this in the next refactorings.
"""
current_version_id = 1
def __init__(self):
# Create an object attribute from the class attribute so it ends up
# in the pickled data in the .sconsign file.
_version_id = self.current_version_id
def convert_to_sconsign(self):
self.binfo.convert_to_sconsign()
def convert_from_sconsign(self, dir, name):
self.binfo.convert_from_sconsign(dir, name)
class Base(object):
"""
This is the controlling class for the signatures for the collection of
entries associated with a specific directory. The actual directory
association will be maintained by a subclass that is specific to
the underlying storage method. This class provides a common set of
methods for fetching and storing the individual bits of information
that make up signature entry.
"""
def __init__(self):
self.entries = {}
self.dirty = False
self.to_be_merged = {}
def get_entry(self, filename):
"""
Fetch the specified entry attribute.
"""
return self.entries[filename]
def set_entry(self, filename, obj):
"""
Set the entry.
"""
self.entries[filename] = obj
self.dirty = True
def do_not_set_entry(self, filename, obj):
pass
def store_info(self, filename, node):
entry = node.get_stored_info()
entry.binfo.merge(node.get_binfo())
self.to_be_merged[filename] = node
self.dirty = True
def do_not_store_info(self, filename, node):
pass
def merge(self):
for key, node in self.to_be_merged.items():
entry = node.get_stored_info()
try:
ninfo = entry.ninfo
except AttributeError:
# This happens with SConf Nodes, because the configuration
# subsystem takes direct control over how the build decision
# is made and its information stored.
pass
else:
ninfo.merge(node.get_ninfo())
self.entries[key] = entry
self.to_be_merged = {}
class DB(Base):
"""
A Base subclass that reads and writes signature information
from a global .sconsign.db* file--the actual file suffix is
determined by the database module.
"""
def __init__(self, dir):
Base.__init__(self)
self.dir = dir
db, mode = Get_DataBase(dir)
# Read using the path relative to the top of the Repository
# (self.dir.tpath) from which we're fetching the signature
# information.
path = normcase(dir.tpath)
try:
rawentries = db[path]
except KeyError:
pass
else:
try:
self.entries = pickle.loads(rawentries)
if not isinstance(self.entries, dict):
self.entries = {}
raise TypeError
except KeyboardInterrupt:
raise
except Exception, e:
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt sconsign entry : %s (%s)\n"%(self.dir.tpath, e))
for key, entry in self.entries.items():
entry.convert_from_sconsign(dir, key)
if mode == "r":
# This directory is actually under a repository, which means
# likely they're reaching in directly for a dependency on
# a file there. Don't actually set any entry info, so we
# won't try to write to that .sconsign.dblite file.
self.set_entry = self.do_not_set_entry
self.store_info = self.do_not_store_info
global sig_files
sig_files.append(self)
def write(self, sync=1):
if not self.dirty:
return
self.merge()
db, mode = Get_DataBase(self.dir)
# Write using the path relative to the top of the SConstruct
# directory (self.dir.path), not relative to the top of
# the Repository; we only write to our own .sconsign file,
# not to .sconsign files in Repositories.
path = normcase(self.dir.path)
for key, entry in self.entries.items():
entry.convert_to_sconsign()
db[path] = pickle.dumps(self.entries, 1)
if sync:
try:
syncmethod = db.sync
except AttributeError:
# Not all anydbm modules have sync() methods.
pass
else:
syncmethod()
class Dir(Base):
def __init__(self, fp=None, dir=None):
"""
fp - file pointer to read entries from
"""
Base.__init__(self)
if not fp:
return
self.entries = pickle.load(fp)
if not isinstance(self.entries, dict):
self.entries = {}
raise TypeError
if dir:
for key, entry in self.entries.items():
entry.convert_from_sconsign(dir, key)
class DirFile(Dir):
"""
Encapsulates reading and writing a per-directory .sconsign file.
"""
def __init__(self, dir):
"""
dir - the directory for the file
"""
self.dir = dir
self.sconsign = os.path.join(dir.path, '.sconsign')
try:
fp = open(self.sconsign, 'rb')
except IOError:
fp = None
try:
Dir.__init__(self, fp, dir)
except KeyboardInterrupt:
raise
except:
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt .sconsign file: %s"%self.sconsign)
global sig_files
sig_files.append(self)
def write(self, sync=1):
"""
Write the .sconsign file to disk.
Try to write to a temporary file first, and rename it if we
succeed. If we can't write to the temporary file, it's
probably because the directory isn't writable (and if so,
how did we build anything in this directory, anyway?), so
try to write directly to the .sconsign file as a backup.
If we can't rename, try to copy the temporary contents back
to the .sconsign file. Either way, always try to remove
the temporary file at the end.
"""
if not self.dirty:
return
self.merge()
temp = os.path.join(self.dir.path, '.scons%d' % os.getpid())
try:
file = open(temp, 'wb')
fname = temp
except IOError:
try:
file = open(self.sconsign, 'wb')
fname = self.sconsign
except IOError:
return
for key, entry in self.entries.items():
entry.convert_to_sconsign()
pickle.dump(self.entries, file, 1)
file.close()
if fname != self.sconsign:
try:
mode = os.stat(self.sconsign)[0]
os.chmod(self.sconsign, 0666)
os.unlink(self.sconsign)
except (IOError, OSError):
# Try to carry on in the face of either OSError
# (things like permission issues) or IOError (disk
# or network issues). If there's a really dangerous
# issue, it should get re-raised by the calls below.
pass
try:
os.rename(fname, self.sconsign)
except OSError:
# An OSError failure to rename may indicate something
# like the directory has no write permission, but
# the .sconsign file itself might still be writable,
# so try writing on top of it directly. An IOError
# here, or in any of the following calls, would get
# raised, indicating something like a potentially
# serious disk or network issue.
open(self.sconsign, 'wb').write(open(fname, 'rb').read())
os.chmod(self.sconsign, mode)
try:
os.unlink(temp)
except (IOError, OSError):
pass
ForDirectory = DB
def File(name, dbm_module=None):
"""
Arrange for all signatures to be stored in a global .sconsign.db*
file.
"""
global ForDirectory, DB_Name, DB_Module
if name is None:
ForDirectory = DirFile
DB_Module = None
else:
ForDirectory = DB
DB_Name = name
if not dbm_module is None:
DB_Module = dbm_module
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""SCons.Errors
This file contains the exception classes used to handle internal
and user errors in SCons.
"""
__revision__ = "src/engine/SCons/Errors.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Util
import exceptions
class BuildError(Exception):
""" Errors occuring while building.
BuildError have the following attributes:
Information about the cause of the build error:
-----------------------------------------------
errstr : a description of the error message
status : the return code of the action that caused the build
error. Must be set to a non-zero value even if the
build error is not due to an action returning a
non-zero returned code.
exitstatus : SCons exit status due to this build error.
Must be nonzero unless due to an explicit Exit()
call. Not always the same as status, since
actions return a status code that should be
respected, but SCons typically exits with 2
irrespective of the return value of the failed
action.
filename : The name of the file or directory that caused the
build error. Set to None if no files are associated with
this error. This might be different from the target
being built. For example, failure to create the
directory in which the target file will appear. It
can be None if the error is not due to a particular
filename.
exc_info : Info about exception that caused the build
error. Set to (None, None, None) if this build
error is not due to an exception.
Information about the cause of the location of the error:
---------------------------------------------------------
node : the error occured while building this target node(s)
executor : the executor that caused the build to fail (might
be None if the build failures is not due to the
executor failing)
action : the action that caused the build to fail (might be
None if the build failures is not due to the an
action failure)
command : the command line for the action that caused the
build to fail (might be None if the build failures
is not due to the an action failure)
"""
def __init__(self,
node=None, errstr="Unknown error", status=2, exitstatus=2,
filename=None, executor=None, action=None, command=None,
exc_info=(None, None, None)):
self.errstr = errstr
self.status = status
self.exitstatus = exitstatus
self.filename = filename
self.exc_info = exc_info
self.node = node
self.executor = executor
self.action = action
self.command = command
Exception.__init__(self, node, errstr, status, exitstatus, filename,
executor, action, command, exc_info)
def __str__(self):
if self.filename:
return self.filename + ': ' + self.errstr
else:
return self.errstr
class InternalError(Exception):
pass
class UserError(Exception):
pass
class StopError(Exception):
pass
class EnvironmentError(Exception):
pass
class MSVCError(IOError):
pass
class ExplicitExit(Exception):
def __init__(self, node=None, status=None, *args):
self.node = node
self.status = status
self.exitstatus = status
Exception.__init__(self, *args)
def convert_to_BuildError(status, exc_info=None):
"""
Convert any return code a BuildError Exception.
`status' can either be a return code or an Exception.
The buildError.status we set here will normally be
used as the exit status of the "scons" process.
"""
if not exc_info and isinstance(status, Exception):
exc_info = (status.__class__, status, None)
if isinstance(status, BuildError):
buildError = status
buildError.exitstatus = 2 # always exit with 2 on build errors
elif isinstance(status, ExplicitExit):
status = status.status
errstr = 'Explicit exit, status %s' % status
buildError = BuildError(
errstr=errstr,
status=status, # might be 0, OK here
exitstatus=status, # might be 0, OK here
exc_info=exc_info)
elif isinstance(status, (StopError, UserError)):
buildError = BuildError(
errstr=str(status),
status=2,
exitstatus=2,
exc_info=exc_info)
elif isinstance(status, exceptions.EnvironmentError):
# If an IOError/OSError happens, raise a BuildError.
# Report the name of the file or directory that caused the
# error, which might be different from the target being built
# (for example, failure to create the directory in which the
# target file will appear).
try: filename = status.filename
except AttributeError: filename = None
buildError = BuildError(
errstr=status.strerror,
status=status.errno,
exitstatus=2,
filename=filename,
exc_info=exc_info)
elif isinstance(status, Exception):
buildError = BuildError(
errstr='%s : %s' % (status.__class__.__name__, status),
status=2,
exitstatus=2,
exc_info=exc_info)
elif SCons.Util.is_String(status):
buildError = BuildError(
errstr=status,
status=2,
exitstatus=2)
else:
buildError = BuildError(
errstr="Error %s" % status,
status=status,
exitstatus=2)
#import sys
#sys.stderr.write("convert_to_BuildError: status %s => (errstr %s, status %s)"%(status,buildError.errstr, buildError.status))
return buildError
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Subst
SCons string substitution.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Subst.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import collections
import re
import SCons.Errors
from SCons.Util import is_String, is_Sequence
# Indexed by the SUBST_* constants below.
_strconv = [SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_signature]
AllowableExceptions = (IndexError, NameError)
def SetAllowableExceptions(*excepts):
global AllowableExceptions
AllowableExceptions = [_f for _f in excepts if _f]
def raise_exception(exception, target, s):
name = exception.__class__.__name__
msg = "%s `%s' trying to evaluate `%s'" % (name, exception, s)
if target:
raise SCons.Errors.BuildError(target[0], msg)
else:
raise SCons.Errors.UserError(msg)
class Literal(object):
"""A wrapper for a string. If you use this object wrapped
around a string, then it will be interpreted as literal.
When passed to the command interpreter, all special
characters will be escaped."""
def __init__(self, lstr):
self.lstr = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.lstr
def is_literal(self):
return 1
class SpecialAttrWrapper(object):
"""This is a wrapper for what we call a 'Node special attribute.'
This is any of the attributes of a Node that we can reference from
Environment variable substitution, such as $TARGET.abspath or
$SOURCES[1].filebase. We implement the same methods as Literal
so we can handle special characters, plus a for_signature method,
such that we can return some canonical string during signature
calculation to avoid unnecessary rebuilds."""
def __init__(self, lstr, for_signature=None):
"""The for_signature parameter, if supplied, will be the
canonical string we return from for_signature(). Else
we will simply return lstr."""
self.lstr = lstr
if for_signature:
self.forsig = for_signature
else:
self.forsig = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.forsig
def is_literal(self):
return 1
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg)
class CmdStringHolder(collections.UserString):
"""This is a special class used to hold strings generated by
scons_subst() and scons_subst_list(). It defines a special method
escape(). When passed a function with an escape algorithm for a
particular platform, it will return the contained string with the
proper escape sequences inserted.
"""
def __init__(self, cmd, literal=None):
collections.UserString.__init__(self, cmd)
self.literal = literal
def is_literal(self):
return self.literal
def escape(self, escape_func, quote_func=quote_spaces):
"""Escape the string with the supplied function. The
function is expected to take an arbitrary string, then
return it with all special characters escaped and ready
for passing to the command interpreter.
After calling this function, the next call to str() will
return the escaped string.
"""
if self.is_literal():
return escape_func(self.data)
elif ' ' in self.data or '\t' in self.data:
return quote_func(self.data)
else:
return self.data
def escape_list(mylist, escape_func):
"""Escape a list of arguments by running the specified escape_func
on every object in the list that has an escape() method."""
def escape(obj, escape_func=escape_func):
try:
e = obj.escape
except AttributeError:
return obj
else:
return e(escape_func)
return list(map(escape, mylist))
class NLWrapper(object):
"""A wrapper class that delays turning a list of sources or targets
into a NodeList until it's needed. The specified function supplied
when the object is initialized is responsible for turning raw nodes
into proxies that implement the special attributes like .abspath,
.source, etc. This way, we avoid creating those proxies just
"in case" someone is going to use $TARGET or the like, and only
go through the trouble if we really have to.
In practice, this might be a wash performance-wise, but it's a little
cleaner conceptually...
"""
def __init__(self, list, func):
self.list = list
self.func = func
def _return_nodelist(self):
return self.nodelist
def _gen_nodelist(self):
mylist = self.list
if mylist is None:
mylist = []
elif not is_Sequence(mylist):
mylist = [mylist]
# The map(self.func) call is what actually turns
# a list into appropriate proxies.
self.nodelist = SCons.Util.NodeList(list(map(self.func, mylist)))
self._create_nodelist = self._return_nodelist
return self.nodelist
_create_nodelist = _gen_nodelist
class Targets_or_Sources(collections.UserList):
"""A class that implements $TARGETS or $SOURCES expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access the list, calling the NLWrapper to create proxies on demand.
Note that we subclass collections.UserList purely so that the
is_Sequence() function will identify an object of this class as
a list during variable expansion. We're not really using any
collections.UserList methods in practice.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
return getattr(nl, attr)
def __getitem__(self, i):
nl = self.nl._create_nodelist()
return nl[i]
def __getslice__(self, i, j):
nl = self.nl._create_nodelist()
i = max(i, 0); j = max(j, 0)
return nl[i:j]
def __str__(self):
nl = self.nl._create_nodelist()
return str(nl)
def __repr__(self):
nl = self.nl._create_nodelist()
return repr(nl)
class Target_or_Source(object):
"""A class that implements $TARGET or $SOURCE expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access an individual proxy Node, calling the NLWrapper to create
a proxy on demand.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
try:
nl0 = nl[0]
except IndexError:
# If there is nothing in the list, then we have no attributes to
# pass through, so raise AttributeError for everything.
raise AttributeError("NodeList has no attribute: %s" % attr)
return getattr(nl0, attr)
def __str__(self):
nl = self.nl._create_nodelist()
if nl:
return str(nl[0])
return ''
def __repr__(self):
nl = self.nl._create_nodelist()
if nl:
return repr(nl[0])
return ''
class NullNodeList(SCons.Util.NullSeq):
def __call__(self, *args, **kwargs): return ''
def __str__(self): return ''
NullNodesList = NullNodeList()
def subst_dict(target, source):
"""Create a dictionary for substitution of special
construction variables.
This translates the following special arguments:
target - the target (object or array of objects),
used to generate the TARGET and TARGETS
construction variables
source - the source (object or array of objects),
used to generate the SOURCES and SOURCE
construction variables
"""
dict = {}
if target:
def get_tgt_subst_proxy(thing):
try:
subst_proxy = thing.get_subst_proxy()
except AttributeError:
subst_proxy = thing # probably a string, just return it
return subst_proxy
tnl = NLWrapper(target, get_tgt_subst_proxy)
dict['TARGETS'] = Targets_or_Sources(tnl)
dict['TARGET'] = Target_or_Source(tnl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_TARGETS'] = '$TARGETS'
dict['UNCHANGED_TARGETS'] = '$TARGETS'
else:
dict['TARGETS'] = NullNodesList
dict['TARGET'] = NullNodesList
if source:
def get_src_subst_proxy(node):
try:
rfile = node.rfile
except AttributeError:
pass
else:
node = rfile()
try:
return node.get_subst_proxy()
except AttributeError:
return node # probably a String, just return it
snl = NLWrapper(source, get_src_subst_proxy)
dict['SOURCES'] = Targets_or_Sources(snl)
dict['SOURCE'] = Target_or_Source(snl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_SOURCES'] = '$SOURCES'
dict['UNCHANGED_SOURCES'] = '$SOURCES'
else:
dict['SOURCES'] = NullNodesList
dict['SOURCE'] = NullNodesList
return dict
# Constants for the "mode" parameter to scons_subst_list() and
# scons_subst(). SUBST_RAW gives the raw command line. SUBST_CMD
# gives a command line suitable for passing to a shell. SUBST_SIG
# gives a command line appropriate for calculating the signature
# of a command line...if this changes, we should rebuild.
SUBST_CMD = 0
SUBST_RAW = 1
SUBST_SIG = 2
_rm = re.compile(r'\$[()]')
_remove = re.compile(r'\$\([^\$]*(\$[^\)][^\$]*)*\$\)')
# Indexed by the SUBST_* constants above.
_regex_remove = [ _rm, None, _remove ]
def _rm_list(list):
#return [ l for l in list if not l in ('$(', '$)') ]
return [l for l in list if not l in ('$(', '$)')]
def _remove_list(list):
result = []
do_append = result.append
for l in list:
if l == '$(':
do_append = lambda x: None
elif l == '$)':
do_append = result.append
else:
do_append(l)
return result
# Indexed by the SUBST_* constants above.
_list_remove = [ _rm_list, None, _remove_list ]
# Regular expressions for splitting strings and handling substitutions,
# for use by the scons_subst() and scons_subst_list() functions:
#
# The first expression compiled matches all of the $-introduced tokens
# that we need to process in some way, and is used for substitutions.
# The expressions it matches are:
#
# "$$"
# "$("
# "$)"
# "$variable" [must begin with alphabetic or underscore]
# "${any stuff}"
#
# The second expression compiled is used for splitting strings into tokens
# to be processed, and it matches all of the tokens listed above, plus
# the following that affect how arguments do or don't get joined together:
#
# " " [white space]
# "non-white-space" [without any dollar signs]
# "$" [single dollar sign]
#
_dollar_exps_str = r'\$[\$\(\)]|\$[_a-zA-Z][\.\w]*|\${[^}]*}'
_dollar_exps = re.compile(r'(%s)' % _dollar_exps_str)
_separate_args = re.compile(r'(%s|\s+|[^\s\$]+|\$)' % _dollar_exps_str)
# This regular expression is used to replace strings of multiple white
# space characters in the string result from the scons_subst() function.
_space_sep = re.compile(r'[\t ]+(?![^{]*})')
def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Expand a string or list containing construction variable
substitutions.
This is the work-horse function for substitutions in file names
and the like. The companion scons_subst_list() function (below)
handles separating command lines into lists of arguments, so see
that function if that's what you're looking for.
"""
if isinstance(strSubst, str) and strSubst.find('$') < 0:
return strSubst
class StringSubber(object):
"""A class to construct the results of a scons_subst() call.
This binds a specific construction environment, mode, target and
source with two methods (substitute() and expand()) that handle
the expansion.
"""
def __init__(self, env, mode, conv, gvars):
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
def expand(self, s, lvars):
"""Expand a single "token" as necessary, returning an
appropriate string containing the expansion.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
return s
if s0 != '$':
return s
if s1 == '$':
return '$'
elif s1 in '()':
return s
else:
key = s[1:]
if key[0] == '{' or key.find('.') >= 0:
if key[0] == '{':
key = key[1:-1]
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception, e:
if e.__class__ in AllowableExceptions:
return ''
raise_exception(e, lvars['TARGETS'], s)
else:
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
elif not NameError in AllowableExceptions:
raise_exception(NameError(key), lvars['TARGETS'], s)
else:
return ''
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
#
# This could potentially be optimized by only
# copying lvars when s contains more expansions,
# but lvars is usually supposed to be pretty
# small, and deeply nested variable expansions
# are probably more the exception than the norm,
# so it should be tolerable for now.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
return self.substitute(s, lv)
elif is_Sequence(s):
def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):
return conv(substitute(l, lvars))
return list(map(func, s))
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
return s
s = self.conv(s)
return self.substitute(s, lvars)
elif s is None:
return ''
else:
return s
def substitute(self, args, lvars):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
try:
def sub_match(match):
return self.conv(self.expand(match.group(1), lvars))
result = _dollar_exps.sub(sub_match, args)
except TypeError:
# If the internal conversion routine doesn't return
# strings (it could be overridden to return Nodes, for
# example), then the 1.5.2 re module will throw this
# exception. Back off to a slower, general-purpose
# algorithm that works for all data types.
args = _separate_args.findall(args)
result = []
for a in args:
result.append(self.conv(self.expand(a, lvars)))
if len(result) == 1:
result = result[0]
else:
result = ''.join(map(str, result))
return result
else:
return self.expand(args, lvars)
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ss = StringSubber(env, mode, conv, gvars)
result = ss.substitute(strSubst, lvars)
try:
del gvars['__builtins__']
except KeyError:
pass
if is_String(result):
# Remove $(-$) pairs and any stuff in between,
# if that's appropriate.
remove = _regex_remove[mode]
if remove:
result = remove.sub('', result)
if mode != SUBST_RAW:
# Compress strings of white space characters into
# a single space.
result = _space_sep.sub(' ', result).strip()
elif is_Sequence(result):
remove = _list_remove[mode]
if remove:
result = remove(result)
return result
#Subst_List_Strings = {}
def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Substitute construction variables in a string (or list or other
object) and separate the arguments into a command list.
The companion scons_subst() function (above) handles basic
substitutions within strings, so see that function instead
if that's what you're looking for.
"""
# try:
# Subst_List_Strings[strSubst] = Subst_List_Strings[strSubst] + 1
# except KeyError:
# Subst_List_Strings[strSubst] = 1
# import SCons.Debug
# SCons.Debug.caller_trace(1)
class ListSubber(collections.UserList):
"""A class to construct the results of a scons_subst_list() call.
Like StringSubber, this class binds a specific construction
environment, mode, target and source with two methods
(substitute() and expand()) that handle the expansion.
In addition, however, this class is used to track the state of
the result(s) we're gathering so we can do the appropriate thing
whenever we have to append another word to the result--start a new
line, start a new word, append to the current word, etc. We do
this by setting the "append" attribute to the right method so
that our wrapper methods only need ever call ListSubber.append(),
and the rest of the object takes care of doing the right thing
internally.
"""
def __init__(self, env, mode, conv, gvars):
collections.UserList.__init__(self, [])
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
if self.mode == SUBST_RAW:
self.add_strip = lambda x: self.append(x)
else:
self.add_strip = lambda x: None
self.in_strip = None
self.next_line()
def expand(self, s, lvars, within_list):
"""Expand a single "token" as necessary, appending the
expansion to the current result.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
self.append(s)
return
if s0 != '$':
self.append(s)
return
if s1 == '$':
self.append('$')
elif s1 == '(':
self.open_strip('$(')
elif s1 == ')':
self.close_strip('$)')
else:
key = s[1:]
if key[0] == '{' or key.find('.') >= 0:
if key[0] == '{':
key = key[1:-1]
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception, e:
if e.__class__ in AllowableExceptions:
return
raise_exception(e, lvars['TARGETS'], s)
else:
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
elif not NameError in AllowableExceptions:
raise_exception(NameError(), lvars['TARGETS'], s)
else:
return
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
self.substitute(s, lv, 0)
self.this_word()
elif is_Sequence(s):
for a in s:
self.substitute(a, lvars, 1)
self.next_word()
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
self.append(s)
return
s = self.conv(s)
self.substitute(s, lvars, within_list)
elif s is None:
self.this_word()
else:
self.append(s)
def substitute(self, args, lvars, within_list):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
args = _separate_args.findall(args)
for a in args:
if a[0] in ' \t\n\r\f\v':
if '\n' in a:
self.next_line()
elif within_list:
self.append(a)
else:
self.next_word()
else:
self.expand(a, lvars, within_list)
else:
self.expand(args, lvars, within_list)
def next_line(self):
"""Arrange for the next word to start a new line. This
is like starting a new word, except that we have to append
another line to the result."""
collections.UserList.append(self, [])
self.next_word()
def this_word(self):
"""Arrange for the next word to append to the end of the
current last word in the result."""
self.append = self.add_to_current_word
def next_word(self):
"""Arrange for the next word to start a new word."""
self.append = self.add_new_word
def add_to_current_word(self, x):
"""Append the string x to the end of the current last word
in the result. If that is not possible, then just add
it as a new word. Make sure the entire concatenated string
inherits the object attributes of x (in particular, the
escape function) by wrapping it as CmdStringHolder."""
if not self.in_strip or self.mode != SUBST_SIG:
try:
current_word = self[-1][-1]
except IndexError:
self.add_new_word(x)
else:
# All right, this is a hack and it should probably
# be refactored out of existence in the future.
# The issue is that we want to smoosh words together
# and make one file name that gets escaped if
# we're expanding something like foo$EXTENSION,
# but we don't want to smoosh them together if
# it's something like >$TARGET, because then we'll
# treat the '>' like it's part of the file name.
# So for now, just hard-code looking for the special
# command-line redirection characters...
try:
last_char = str(current_word)[-1]
except IndexError:
last_char = '\0'
if last_char in '<>|':
self.add_new_word(x)
else:
y = current_word + x
# We used to treat a word appended to a literal
# as a literal itself, but this caused problems
# with interpreting quotes around space-separated
# targets on command lines. Removing this makes
# none of the "substantive" end-to-end tests fail,
# so we'll take this out but leave it commented
# for now in case there's a problem not covered
# by the test cases and we need to resurrect this.
#literal1 = self.literal(self[-1][-1])
#literal2 = self.literal(x)
y = self.conv(y)
if is_String(y):
#y = CmdStringHolder(y, literal1 or literal2)
y = CmdStringHolder(y, None)
self[-1][-1] = y
def add_new_word(self, x):
if not self.in_strip or self.mode != SUBST_SIG:
literal = self.literal(x)
x = self.conv(x)
if is_String(x):
x = CmdStringHolder(x, literal)
self[-1].append(x)
self.append = self.add_to_current_word
def literal(self, x):
try:
l = x.is_literal
except AttributeError:
return None
else:
return l()
def open_strip(self, x):
"""Handle the "open strip" $( token."""
self.add_strip(x)
self.in_strip = 1
def close_strip(self, x):
"""Handle the "close strip" $) token."""
self.add_strip(x)
self.in_strip = None
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ls = ListSubber(env, mode, conv, gvars)
ls.substitute(strSubst, lvars, 0)
try:
del gvars['__builtins__']
except KeyError:
pass
return ls.data
def scons_subst_once(strSubst, env, key):
"""Perform single (non-recursive) substitution of a single
construction variable keyword.
This is used when setting a variable when copying or overriding values
in an Environment. We want to capture (expand) the old value before
we override it, so people can do things like:
env2 = env.Clone(CCFLAGS = '$CCFLAGS -g')
We do this with some straightforward, brute-force code here...
"""
if isinstance(strSubst, str) and strSubst.find('$') < 0:
return strSubst
matchlist = ['$' + key, '${' + key + '}']
val = env.get(key, '')
def sub_match(match, val=val, matchlist=matchlist):
a = match.group(1)
if a in matchlist:
a = val
if is_Sequence(a):
return ' '.join(map(str, a))
else:
return str(a)
if is_Sequence(strSubst):
result = []
for arg in strSubst:
if is_String(arg):
if arg in matchlist:
arg = val
if is_Sequence(arg):
result.extend(arg)
else:
result.append(arg)
else:
result.append(_dollar_exps.sub(sub_match, arg))
else:
result.append(arg)
return result
elif is_String(strSubst):
return _dollar_exps.sub(sub_match, strSubst)
else:
return strSubst
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""scons.Node.FS
File system nodes.
These Nodes represent the canonical external objects that people think
of when they think of building software: files and directories.
This holds a "default_fs" variable that should be initialized with an FS
that can be used by scripts or modules looking for the canonical default.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Node/FS.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import fnmatch
import os
import re
import shutil
import stat
import sys
import time
import codecs
import SCons.Action
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Subst
import SCons.Util
import SCons.Warnings
from SCons.Debug import Trace
do_store_info = True
print_duplicate = 0
class EntryProxyAttributeError(AttributeError):
"""
An AttributeError subclass for recording and displaying the name
of the underlying Entry involved in an AttributeError exception.
"""
def __init__(self, entry_proxy, attribute):
AttributeError.__init__(self)
self.entry_proxy = entry_proxy
self.attribute = attribute
def __str__(self):
entry = self.entry_proxy.get()
fmt = "%s instance %s has no attribute %s"
return fmt % (entry.__class__.__name__,
repr(entry.name),
repr(self.attribute))
# The max_drift value: by default, use a cached signature value for
# any file that's been untouched for more than two days.
default_max_drift = 2*24*60*60
#
# We stringify these file system Nodes a lot. Turning a file system Node
# into a string is non-trivial, because the final string representation
# can depend on a lot of factors: whether it's a derived target or not,
# whether it's linked to a repository or source directory, and whether
# there's duplication going on. The normal technique for optimizing
# calculations like this is to memoize (cache) the string value, so you
# only have to do the calculation once.
#
# A number of the above factors, however, can be set after we've already
# been asked to return a string for a Node, because a Repository() or
# VariantDir() call or the like may not occur until later in SConscript
# files. So this variable controls whether we bother trying to save
# string values for Nodes. The wrapper interface can set this whenever
# they're done mucking with Repository and VariantDir and the other stuff,
# to let this module know it can start returning saved string values
# for Nodes.
#
Save_Strings = None
def save_strings(val):
global Save_Strings
Save_Strings = val
#
# Avoid unnecessary function calls by recording a Boolean value that
# tells us whether or not os.path.splitdrive() actually does anything
# on this system, and therefore whether we need to bother calling it
# when looking up path names in various methods below.
#
do_splitdrive = None
_my_splitdrive =None
def initialize_do_splitdrive():
global do_splitdrive
global has_unc
drive, path = os.path.splitdrive('X:/foo')
has_unc = hasattr(os.path, 'splitunc')
do_splitdrive = not not drive or has_unc
global _my_splitdrive
if has_unc:
def splitdrive(p):
if p[1:2] == ':':
return p[:2], p[2:]
if p[0:2] == '//':
# Note that we leave a leading slash in the path
# because UNC paths are always absolute.
return '//', p[1:]
return '', p
else:
def splitdrive(p):
if p[1:2] == ':':
return p[:2], p[2:]
return '', p
_my_splitdrive = splitdrive
# Keep some commonly used values in global variables to skip to
# module look-up costs.
global OS_SEP
global UNC_PREFIX
global os_sep_is_slash
OS_SEP = os.sep
UNC_PREFIX = OS_SEP + OS_SEP
os_sep_is_slash = OS_SEP == '/'
initialize_do_splitdrive()
# Used to avoid invoking os.path.normpath if not necessary.
needs_normpath_check = re.compile(
r'''
# We need to renormalize the path if it contains any consecutive
# '/' characters.
.*// |
# We need to renormalize the path if it contains a '..' directory.
# Note that we check for all the following cases:
#
# a) The path is a single '..'
# b) The path starts with '..'. E.g. '../' or '../moredirs'
# but we not match '..abc/'.
# c) The path ends with '..'. E.g. '/..' or 'dirs/..'
# d) The path contains a '..' in the middle.
# E.g. dirs/../moredirs
(.*/)?\.\.(?:/|$) |
# We need to renormalize the path if it contains a '.'
# directory, but NOT if it is a single '.' '/' characters. We
# do not want to match a single '.' because this case is checked
# for explicitely since this is common enough case.
#
# Note that we check for all the following cases:
#
# a) We don't match a single '.'
# b) We match if the path starts with '.'. E.g. './' or
# './moredirs' but we not match '.abc/'.
# c) We match if the path ends with '.'. E.g. '/.' or
# 'dirs/.'
# d) We match if the path contains a '.' in the middle.
# E.g. dirs/./moredirs
\./|.*/\.(?:/|$)
''',
re.VERBOSE
)
needs_normpath_match = needs_normpath_check.match
#
# SCons.Action objects for interacting with the outside world.
#
# The Node.FS methods in this module should use these actions to
# create and/or remove files and directories; they should *not* use
# os.{link,symlink,unlink,mkdir}(), etc., directly.
#
# Using these SCons.Action objects ensures that descriptions of these
# external activities are properly displayed, that the displays are
# suppressed when the -s (silent) option is used, and (most importantly)
# the actions are disabled when the the -n option is used, in which case
# there should be *no* changes to the external file system(s)...
#
if hasattr(os, 'link'):
def _hardlink_func(fs, src, dst):
# If the source is a symlink, we can't just hard-link to it
# because a relative symlink may point somewhere completely
# different. We must disambiguate the symlink and then
# hard-link the final destination file.
while fs.islink(src):
link = fs.readlink(src)
if not os.path.isabs(link):
src = link
else:
src = os.path.join(os.path.dirname(src), link)
fs.link(src, dst)
else:
_hardlink_func = None
if hasattr(os, 'symlink'):
def _softlink_func(fs, src, dst):
fs.symlink(src, dst)
else:
_softlink_func = None
def _copy_func(fs, src, dest):
shutil.copy2(src, dest)
st = fs.stat(src)
fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy',
'hard-copy', 'soft-copy', 'copy']
Link_Funcs = [] # contains the callables of the specified duplication style
def set_duplicate(duplicate):
# Fill in the Link_Funcs list according to the argument
# (discarding those not available on the platform).
# Set up the dictionary that maps the argument names to the
# underlying implementations. We do this inside this function,
# not in the top-level module code, so that we can remap os.link
# and os.symlink for testing purposes.
link_dict = {
'hard' : _hardlink_func,
'soft' : _softlink_func,
'copy' : _copy_func
}
if not duplicate in Valid_Duplicates:
raise SCons.Errors.InternalError("The argument of set_duplicate "
"should be in Valid_Duplicates")
global Link_Funcs
Link_Funcs = []
for func in duplicate.split('-'):
if link_dict[func]:
Link_Funcs.append(link_dict[func])
def LinkFunc(target, source, env):
# Relative paths cause problems with symbolic links, so
# we use absolute paths, which may be a problem for people
# who want to move their soft-linked src-trees around. Those
# people should use the 'hard-copy' mode, softlinks cannot be
# used for that; at least I have no idea how ...
src = source[0].abspath
dest = target[0].abspath
dir, file = os.path.split(dest)
if dir and not target[0].fs.isdir(dir):
os.makedirs(dir)
if not Link_Funcs:
# Set a default order of link functions.
set_duplicate('hard-soft-copy')
fs = source[0].fs
# Now link the files with the previously specified order.
for func in Link_Funcs:
try:
func(fs, src, dest)
break
except (IOError, OSError):
# An OSError indicates something happened like a permissions
# problem or an attempt to symlink across file-system
# boundaries. An IOError indicates something like the file
# not existing. In either case, keeping trying additional
# functions in the list and only raise an error if the last
# one failed.
if func == Link_Funcs[-1]:
# exception of the last link method (copy) are fatal
raise
return 0
Link = SCons.Action.Action(LinkFunc, None)
def LocalString(target, source, env):
return 'Local copy of %s from %s' % (target[0], source[0])
LocalCopy = SCons.Action.Action(LinkFunc, LocalString)
def UnlinkFunc(target, source, env):
t = target[0]
t.fs.unlink(t.abspath)
return 0
Unlink = SCons.Action.Action(UnlinkFunc, None)
def MkdirFunc(target, source, env):
t = target[0]
if not t.exists():
t.fs.mkdir(t.abspath)
return 0
Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None)
MkdirBuilder = None
def get_MkdirBuilder():
global MkdirBuilder
if MkdirBuilder is None:
import SCons.Builder
import SCons.Defaults
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
MkdirBuilder = SCons.Builder.Builder(action = Mkdir,
env = None,
explain = None,
is_explicit = None,
target_scanner = SCons.Defaults.DirEntryScanner,
name = "MkdirBuilder")
return MkdirBuilder
class _Null(object):
pass
_null = _Null()
DefaultSCCSBuilder = None
DefaultRCSBuilder = None
def get_DefaultSCCSBuilder():
global DefaultSCCSBuilder
if DefaultSCCSBuilder is None:
import SCons.Builder
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR')
DefaultSCCSBuilder = SCons.Builder.Builder(action = act,
env = None,
name = "DefaultSCCSBuilder")
return DefaultSCCSBuilder
def get_DefaultRCSBuilder():
global DefaultRCSBuilder
if DefaultRCSBuilder is None:
import SCons.Builder
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR')
DefaultRCSBuilder = SCons.Builder.Builder(action = act,
env = None,
name = "DefaultRCSBuilder")
return DefaultRCSBuilder
# Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem.
_is_cygwin = sys.platform == "cygwin"
if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin:
def _my_normcase(x):
return x
else:
def _my_normcase(x):
return x.upper()
class DiskChecker(object):
def __init__(self, type, do, ignore):
self.type = type
self.do = do
self.ignore = ignore
self.func = do
def __call__(self, *args, **kw):
return self.func(*args, **kw)
def set(self, list):
if self.type in list:
self.func = self.do
else:
self.func = self.ignore
def do_diskcheck_match(node, predicate, errorfmt):
result = predicate()
try:
# If calling the predicate() cached a None value from stat(),
# remove it so it doesn't interfere with later attempts to
# build this Node as we walk the DAG. (This isn't a great way
# to do this, we're reaching into an interface that doesn't
# really belong to us, but it's all about performance, so
# for now we'll just document the dependency...)
if node._memo['stat'] is None:
del node._memo['stat']
except (AttributeError, KeyError):
pass
if result:
raise TypeError(errorfmt % node.abspath)
def ignore_diskcheck_match(node, predicate, errorfmt):
pass
def do_diskcheck_rcs(node, name):
try:
rcs_dir = node.rcs_dir
except AttributeError:
if node.entry_exists_on_disk('RCS'):
rcs_dir = node.Dir('RCS')
else:
rcs_dir = None
node.rcs_dir = rcs_dir
if rcs_dir:
return rcs_dir.entry_exists_on_disk(name+',v')
return None
def ignore_diskcheck_rcs(node, name):
return None
def do_diskcheck_sccs(node, name):
try:
sccs_dir = node.sccs_dir
except AttributeError:
if node.entry_exists_on_disk('SCCS'):
sccs_dir = node.Dir('SCCS')
else:
sccs_dir = None
node.sccs_dir = sccs_dir
if sccs_dir:
return sccs_dir.entry_exists_on_disk('s.'+name)
return None
def ignore_diskcheck_sccs(node, name):
return None
diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_match)
diskcheck_rcs = DiskChecker('rcs', do_diskcheck_rcs, ignore_diskcheck_rcs)
diskcheck_sccs = DiskChecker('sccs', do_diskcheck_sccs, ignore_diskcheck_sccs)
diskcheckers = [
diskcheck_match,
diskcheck_rcs,
diskcheck_sccs,
]
def set_diskcheck(list):
for dc in diskcheckers:
dc.set(list)
def diskcheck_types():
return [dc.type for dc in diskcheckers]
class EntryProxy(SCons.Util.Proxy):
__str__ = SCons.Util.Delegate('__str__')
def __get_abspath(self):
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(),
entry.name + "_abspath")
def __get_filebase(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0],
name + "_filebase")
def __get_suffix(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1],
name + "_suffix")
def __get_file(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(name, name + "_file")
def __get_base_path(self):
"""Return the file's directory and file name, with the
suffix stripped."""
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0],
entry.name + "_base")
def __get_posix_path(self):
"""Return the path with / as the path separator,
regardless of platform."""
if os_sep_is_slash:
return self
else:
entry = self.get()
r = entry.get_path().replace(OS_SEP, '/')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
def __get_windows_path(self):
"""Return the path with \ as the path separator,
regardless of platform."""
if OS_SEP == '\\':
return self
else:
entry = self.get()
r = entry.get_path().replace(OS_SEP, '\\')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
def __get_srcnode(self):
return EntryProxy(self.get().srcnode())
def __get_srcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().dir)
def __get_rsrcnode(self):
return EntryProxy(self.get().srcnode().rfile())
def __get_rsrcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().rfile().dir)
def __get_dir(self):
return EntryProxy(self.get().dir)
dictSpecialAttrs = { "base" : __get_base_path,
"posix" : __get_posix_path,
"windows" : __get_windows_path,
"win32" : __get_windows_path,
"srcpath" : __get_srcnode,
"srcdir" : __get_srcdir,
"dir" : __get_dir,
"abspath" : __get_abspath,
"filebase" : __get_filebase,
"suffix" : __get_suffix,
"file" : __get_file,
"rsrcpath" : __get_rsrcnode,
"rsrcdir" : __get_rsrcdir,
}
def __getattr__(self, name):
# This is how we implement the "special" attributes
# such as base, posix, srcdir, etc.
try:
attr_function = self.dictSpecialAttrs[name]
except KeyError:
try:
attr = SCons.Util.Proxy.__getattr__(self, name)
except AttributeError, e:
# Raise our own AttributeError subclass with an
# overridden __str__() method that identifies the
# name of the entry that caused the exception.
raise EntryProxyAttributeError(self, name)
return attr
else:
return attr_function(self)
class Base(SCons.Node.Node):
"""A generic class for file system entries. This class is for
when we don't know yet whether the entry being looked up is a file
or a directory. Instances of this class can morph into either
Dir or File objects by a later, more precise lookup.
Note: this class does not define __cmp__ and __hash__ for
efficiency reasons. SCons does a lot of comparing of
Node.FS.{Base,Entry,File,Dir} objects, so those operations must be
as fast as possible, which means we want to use Python's built-in
object identity comparisons.
"""
memoizer_counters = []
def __init__(self, name, directory, fs):
"""Initialize a generic Node.FS.Base object.
Call the superclass initialization, take care of setting up
our relative and absolute paths, identify our parent
directory, and indicate that this node should use
signatures."""
if __debug__: logInstanceCreation(self, 'Node.FS.Base')
SCons.Node.Node.__init__(self)
# Filenames and paths are probably reused and are intern'ed to
# save some memory.
#: Filename with extension as it was specified when the object was
#: created; to obtain filesystem path, use Python str() function
self.name = SCons.Util.silent_intern(name)
#: Cached filename extension
self.suffix = SCons.Util.silent_intern(SCons.Util.splitext(name)[1])
self.fs = fs #: Reference to parent Node.FS object
assert directory, "A directory must be provided"
self.abspath = SCons.Util.silent_intern(directory.entry_abspath(name))
self.labspath = SCons.Util.silent_intern(directory.entry_labspath(name))
if directory.path == '.':
self.path = SCons.Util.silent_intern(name)
else:
self.path = SCons.Util.silent_intern(directory.entry_path(name))
if directory.tpath == '.':
self.tpath = SCons.Util.silent_intern(name)
else:
self.tpath = SCons.Util.silent_intern(directory.entry_tpath(name))
self.path_elements = directory.path_elements + [self]
self.dir = directory
self.cwd = None # will hold the SConscript directory for target nodes
self.duplicate = directory.duplicate
def str_for_display(self):
return '"' + self.__str__() + '"'
def must_be_same(self, klass):
"""
This node, which already existed, is being looked up as the
specified klass. Raise an exception if it isn't.
"""
if isinstance(self, klass) or klass is Entry:
return
raise TypeError("Tried to lookup %s '%s' as a %s." %\
(self.__class__.__name__, self.path, klass.__name__))
def get_dir(self):
return self.dir
def get_suffix(self):
return self.suffix
def rfile(self):
return self
def __str__(self):
"""A Node.FS.Base object's string representation is its path
name."""
global Save_Strings
if Save_Strings:
return self._save_str()
return self._get_str()
memoizer_counters.append(SCons.Memoize.CountValue('_save_str'))
def _save_str(self):
try:
return self._memo['_save_str']
except KeyError:
pass
result = sys.intern(self._get_str())
self._memo['_save_str'] = result
return result
def _get_str(self):
global Save_Strings
if self.duplicate or self.is_derived():
return self.get_path()
srcnode = self.srcnode()
if srcnode.stat() is None and self.stat() is not None:
result = self.get_path()
else:
result = srcnode.get_path()
if not Save_Strings:
# We're not at the point where we're saving the string
# representations of FS Nodes (because we haven't finished
# reading the SConscript files and need to have str() return
# things relative to them). That also means we can't yet
# cache values returned (or not returned) by stat(), since
# Python code in the SConscript files might still create
# or otherwise affect the on-disk file. So get rid of the
# values that the underlying stat() method saved.
try: del self._memo['stat']
except KeyError: pass
if self is not srcnode:
try: del srcnode._memo['stat']
except KeyError: pass
return result
rstr = __str__
memoizer_counters.append(SCons.Memoize.CountValue('stat'))
def stat(self):
try: return self._memo['stat']
except KeyError: pass
try: result = self.fs.stat(self.abspath)
except os.error: result = None
self._memo['stat'] = result
return result
def exists(self):
return self.stat() is not None
def rexists(self):
return self.rfile().exists()
def getmtime(self):
st = self.stat()
if st: return st[stat.ST_MTIME]
else: return None
def getsize(self):
st = self.stat()
if st: return st[stat.ST_SIZE]
else: return None
def isdir(self):
st = self.stat()
return st is not None and stat.S_ISDIR(st[stat.ST_MODE])
def isfile(self):
st = self.stat()
return st is not None and stat.S_ISREG(st[stat.ST_MODE])
if hasattr(os, 'symlink'):
def islink(self):
try: st = self.fs.lstat(self.abspath)
except os.error: return 0
return stat.S_ISLNK(st[stat.ST_MODE])
else:
def islink(self):
return 0 # no symlinks
def is_under(self, dir):
if self is dir:
return 1
else:
return self.dir.is_under(dir)
def set_local(self):
self._local = 1
def srcnode(self):
"""If this node is in a build path, return the node
corresponding to its source file. Otherwise, return
ourself.
"""
srcdir_list = self.dir.srcdir_list()
if srcdir_list:
srcnode = srcdir_list[0].Entry(self.name)
srcnode.must_be_same(self.__class__)
return srcnode
return self
def get_path(self, dir=None):
"""Return path relative to the current working directory of the
Node.FS.Base object that owns us."""
if not dir:
dir = self.fs.getcwd()
if self == dir:
return '.'
path_elems = self.path_elements
pathname = ''
try: i = path_elems.index(dir)
except ValueError:
for p in path_elems[:-1]:
pathname += p.dirname
else:
for p in path_elems[i+1:-1]:
pathname += p.dirname
return pathname + path_elems[-1].name
def set_src_builder(self, builder):
"""Set the source code builder for this node."""
self.sbuilder = builder
if not self.has_builder():
self.builder_set(builder)
def src_builder(self):
"""Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root).
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.dir.src_builder()
self.sbuilder = scb
return scb
def get_abspath(self):
"""Get the absolute path of the file."""
return self.abspath
def for_signature(self):
# Return just our name. Even an absolute path would not work,
# because that can change thanks to symlinks or remapped network
# paths.
return self.name
def get_subst_proxy(self):
try:
return self._proxy
except AttributeError:
ret = EntryProxy(self)
self._proxy = ret
return ret
def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext):
"""
Generates a target entry that corresponds to this entry (usually
a source file) with the specified prefix and suffix.
Note that this method can be overridden dynamically for generated
files that need different behavior. See Tool/swig.py for
an example.
"""
return self.dir.Entry(prefix + splitext(self.name)[0] + suffix)
def _Rfindalldirs_key(self, pathlist):
return pathlist
memoizer_counters.append(SCons.Memoize.CountDict('Rfindalldirs', _Rfindalldirs_key))
def Rfindalldirs(self, pathlist):
"""
Return all of the directories for a given path list, including
corresponding "backing" directories in any repositories.
The Node lookups are relative to this Node (typically a
directory), so memoizing result saves cycles from looking
up the same path for each target in a given directory.
"""
try:
memo_dict = self._memo['Rfindalldirs']
except KeyError:
memo_dict = {}
self._memo['Rfindalldirs'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
create_dir_relative_to_self = self.Dir
result = []
for path in pathlist:
if isinstance(path, SCons.Node.Node):
result.append(path)
else:
dir = create_dir_relative_to_self(path)
result.extend(dir.get_all_rdirs())
memo_dict[pathlist] = result
return result
def RDirs(self, pathlist):
"""Search for a list of directories in the Repository list."""
cwd = self.cwd or self.fs._cwd
return cwd.Rfindalldirs(pathlist)
memoizer_counters.append(SCons.Memoize.CountValue('rentry'))
def rentry(self):
try:
return self._memo['rentry']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try:
node = dir.entries[norm_name]
except KeyError:
if dir.entry_exists_on_disk(self.name):
result = dir.Entry(self.name)
break
self._memo['rentry'] = result
return result
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return []
class Entry(Base):
"""This is the class for generic Node.FS entries--that is, things
that could be a File or a Dir, but we're just not sure yet.
Consequently, the methods in this class really exist just to
transform their associated object into the right class when the
time comes, and then call the same-named method in the transformed
class."""
def diskcheck_match(self):
pass
def disambiguate(self, must_exist=None):
"""
"""
if self.isdir():
self.__class__ = Dir
self._morph()
elif self.isfile():
self.__class__ = File
self._morph()
self.clear()
else:
# There was nothing on-disk at this location, so look in
# the src directory.
#
# We can't just use self.srcnode() straight away because
# that would create an actual Node for this file in the src
# directory, and there might not be one. Instead, use the
# dir_on_disk() method to see if there's something on-disk
# with that name, in which case we can go ahead and call
# self.srcnode() to create the right type of entry.
srcdir = self.dir.srcnode()
if srcdir != self.dir and \
srcdir.entry_exists_on_disk(self.name) and \
self.srcnode().isdir():
self.__class__ = Dir
self._morph()
elif must_exist:
msg = "No such file or directory: '%s'" % self.abspath
raise SCons.Errors.UserError(msg)
else:
self.__class__ = File
self._morph()
self.clear()
return self
def rfile(self):
"""We're a generic Entry, but the caller is actually looking for
a File at this point, so morph into one."""
self.__class__ = File
self._morph()
self.clear()
return File.rfile(self)
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
"""Fetch the contents of the entry. Returns the exact binary
contents of the file."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_contents() in emitters and the
# like (e.g. in qt.py) don't have to disambiguate by hand
# or catch the exception.
return ''
else:
return self.get_contents()
def get_text_contents(self):
"""Fetch the decoded text contents of a Unicode encoded Entry.
Since this should return the text contents from the file
system, we check to see into what sort of subclass we should
morph this Entry."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_text_contents() in emitters and
# the like (e.g. in qt.py) don't have to disambiguate by
# hand or catch the exception.
return ''
else:
return self.get_text_contents()
def must_be_same(self, klass):
"""Called to make sure a Node is a Dir. Since we're an
Entry, we can morph into one."""
if self.__class__ is not klass:
self.__class__ = klass
self._morph()
self.clear()
# The following methods can get called before the Taskmaster has
# had a chance to call disambiguate() directly to see if this Entry
# should really be a Dir or a File. We therefore use these to call
# disambiguate() transparently (from our caller's point of view).
#
# Right now, this minimal set of methods has been derived by just
# looking at some of the methods that will obviously be called early
# in any of the various Taskmasters' calling sequences, and then
# empirically figuring out which additional methods are necessary
# to make various tests pass.
def exists(self):
"""Return if the Entry exists. Check the file system to see
what we should turn into first. Assume a file if there's no
directory."""
return self.disambiguate().exists()
def rel_path(self, other):
d = self.disambiguate()
if d.__class__ is Entry:
raise Exception("rel_path() could not disambiguate File/Dir")
return d.rel_path(other)
def new_ninfo(self):
return self.disambiguate().new_ninfo()
def changed_since_last_build(self, target, prev_ni):
return self.disambiguate().changed_since_last_build(target, prev_ni)
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return self.disambiguate()._glob1(pattern, ondisk, source, strings)
def get_subst_proxy(self):
return self.disambiguate().get_subst_proxy()
# This is for later so we can differentiate between Entry the class and Entry
# the method of the FS class.
_classEntry = Entry
class LocalFS(object):
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
# This class implements an abstraction layer for operations involving
# a local file system. Essentially, this wraps any function in
# the os, os.path or shutil modules that we use to actually go do
# anything with or to the local file system.
#
# Note that there's a very good chance we'll refactor this part of
# the architecture in some way as we really implement the interface(s)
# for remote file system Nodes. For example, the right architecture
# might be to have this be a subclass instead of a base class.
# Nevertheless, we're using this as a first step in that direction.
#
# We're not using chdir() yet because the calling subclass method
# needs to use os.chdir() directly to avoid recursion. Will we
# really need this one?
#def chdir(self, path):
# return os.chdir(path)
def chmod(self, path, mode):
return os.chmod(path, mode)
def copy(self, src, dst):
return shutil.copy(src, dst)
def copy2(self, src, dst):
return shutil.copy2(src, dst)
def exists(self, path):
return os.path.exists(path)
def getmtime(self, path):
return os.path.getmtime(path)
def getsize(self, path):
return os.path.getsize(path)
def isdir(self, path):
return os.path.isdir(path)
def isfile(self, path):
return os.path.isfile(path)
def link(self, src, dst):
return os.link(src, dst)
def lstat(self, path):
return os.lstat(path)
def listdir(self, path):
return os.listdir(path)
def makedirs(self, path):
return os.makedirs(path)
def mkdir(self, path):
return os.mkdir(path)
def rename(self, old, new):
return os.rename(old, new)
def stat(self, path):
return os.stat(path)
def symlink(self, src, dst):
return os.symlink(src, dst)
def open(self, path):
return open(path)
def unlink(self, path):
return os.unlink(path)
if hasattr(os, 'symlink'):
def islink(self, path):
return os.path.islink(path)
else:
def islink(self, path):
return 0 # no symlinks
if hasattr(os, 'readlink'):
def readlink(self, file):
return os.readlink(file)
else:
def readlink(self, file):
return ''
#class RemoteFS:
# # Skeleton for the obvious methods we might need from the
# # abstraction layer for a remote filesystem.
# def upload(self, local_src, remote_dst):
# pass
# def download(self, remote_src, local_dst):
# pass
class FS(LocalFS):
memoizer_counters = []
def __init__(self, path = None):
"""Initialize the Node.FS subsystem.
The supplied path is the top of the source tree, where we
expect to find the top-level build file. If no path is
supplied, the current directory is the default.
The path argument must be a valid absolute path.
"""
if __debug__: logInstanceCreation(self, 'Node.FS')
self._memo = {}
self.Root = {}
self.SConstruct_dir = None
self.max_drift = default_max_drift
self.Top = None
if path is None:
self.pathTop = os.getcwd()
else:
self.pathTop = path
self.defaultDrive = _my_normcase(_my_splitdrive(self.pathTop)[0])
self.Top = self.Dir(self.pathTop)
self.Top.path = '.'
self.Top.tpath = '.'
self._cwd = self.Top
DirNodeInfo.fs = self
FileNodeInfo.fs = self
def set_SConstruct_dir(self, dir):
self.SConstruct_dir = dir
def get_max_drift(self):
return self.max_drift
def set_max_drift(self, max_drift):
self.max_drift = max_drift
def getcwd(self):
if hasattr(self, "_cwd"):
return self._cwd
else:
return "<no cwd>"
def chdir(self, dir, change_os_dir=0):
"""Change the current working directory for lookups.
If change_os_dir is true, we will also change the "real" cwd
to match.
"""
curr=self._cwd
try:
if dir is not None:
self._cwd = dir
if change_os_dir:
os.chdir(dir.abspath)
except OSError:
self._cwd = curr
raise
def get_root(self, drive):
"""
Returns the root directory for the specified drive, creating
it if necessary.
"""
drive = _my_normcase(drive)
try:
return self.Root[drive]
except KeyError:
root = RootDir(drive, self)
self.Root[drive] = root
if not drive:
self.Root[self.defaultDrive] = root
elif drive == self.defaultDrive:
self.Root[''] = root
return root
def _lookup(self, p, directory, fsclass, create=1):
"""
The generic entry point for Node lookup with user-supplied data.
This translates arbitrary input into a canonical Node.FS object
of the specified fsclass. The general approach for strings is
to turn it into a fully normalized absolute path and then call
the root directory's lookup_abs() method for the heavy lifting.
If the path name begins with '#', it is unconditionally
interpreted relative to the top-level directory of this FS. '#'
is treated as a synonym for the top-level SConstruct directory,
much like '~' is treated as a synonym for the user's home
directory in a UNIX shell. So both '#foo' and '#/foo' refer
to the 'foo' subdirectory underneath the top-level SConstruct
directory.
If the path name is relative, then the path is looked up relative
to the specified directory, or the current directory (self._cwd,
typically the SConscript directory) if the specified directory
is None.
"""
if isinstance(p, Base):
# It's already a Node.FS object. Make sure it's the right
# class and return.
p.must_be_same(fsclass)
return p
# str(p) in case it's something like a proxy object
p = str(p)
if not os_sep_is_slash:
p = p.replace(OS_SEP, '/')
if p[0:1] == '#':
# There was an initial '#', so we strip it and override
# whatever directory they may have specified with the
# top-level SConstruct directory.
p = p[1:]
directory = self.Top
# There might be a drive letter following the
# '#'. Although it is not described in the SCons man page,
# the regression test suite explicitly tests for that
# syntax. It seems to mean the following thing:
#
# Assuming the the SCons top dir is in C:/xxx/yyy,
# '#X:/toto' means X:/xxx/yyy/toto.
#
# i.e. it assumes that the X: drive has a directory
# structure similar to the one found on drive C:.
if do_splitdrive:
drive, p = _my_splitdrive(p)
if drive:
root = self.get_root(drive)
else:
root = directory.root
else:
root = directory.root
# We can only strip trailing after splitting the drive
# since the drive might the UNC '//' prefix.
p = p.strip('/')
needs_normpath = needs_normpath_match(p)
# The path is relative to the top-level SCons directory.
if p in ('', '.'):
p = directory.labspath
else:
p = directory.labspath + '/' + p
else:
if do_splitdrive:
drive, p = _my_splitdrive(p)
if drive and not p:
# This causes a naked drive letter to be treated
# as a synonym for the root directory on that
# drive.
p = '/'
else:
drive = ''
# We can only strip trailing '/' since the drive might the
# UNC '//' prefix.
if p != '/':
p = p.rstrip('/')
needs_normpath = needs_normpath_match(p)
if p[0:1] == '/':
# Absolute path
root = self.get_root(drive)
else:
# This is a relative lookup or to the current directory
# (the path name is not absolute). Add the string to the
# appropriate directory lookup path, after which the whole
# thing gets normalized.
if directory:
if not isinstance(directory, Dir):
directory = self.Dir(directory)
else:
directory = self._cwd
if p in ('', '.'):
p = directory.labspath
else:
p = directory.labspath + '/' + p
if drive:
root = self.get_root(drive)
else:
root = directory.root
if needs_normpath is not None:
# Normalize a pathname. Will return the same result for
# equivalent paths.
#
# We take advantage of the fact that we have an absolute
# path here for sure. In addition, we know that the
# components of lookup path are separated by slashes at
# this point. Because of this, this code is about 2X
# faster than calling os.path.normpath() followed by
# replacing os.sep with '/' again.
ins = p.split('/')[1:]
outs = []
for d in ins:
if d == '..':
try:
outs.pop()
except IndexError:
pass
elif d not in ('', '.'):
outs.append(d)
p = '/' + '/'.join(outs)
return root._lookup_abs(p, fsclass, create)
def Entry(self, name, directory = None, create = 1):
"""Look up or create a generic Entry node with the specified name.
If the name is a relative path (begins with ./, ../, or a file
name), then it is looked up relative to the supplied directory
node, or to the top level directory of the FS (supplied at
construction time) if no directory is supplied.
"""
return self._lookup(name, directory, Entry, create)
def File(self, name, directory = None, create = 1):
"""Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
"""
return self._lookup(name, directory, File, create)
def Dir(self, name, directory = None, create = True):
"""Look up or create a Dir node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a normal file is found at the
specified path.
"""
return self._lookup(name, directory, Dir, create)
def VariantDir(self, variant_dir, src_dir, duplicate=1):
"""Link the supplied variant directory to the source directory
for purposes of building files."""
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.Dir(src_dir)
if not isinstance(variant_dir, SCons.Node.Node):
variant_dir = self.Dir(variant_dir)
if src_dir.is_under(variant_dir):
raise SCons.Errors.UserError("Source directory cannot be under variant directory.")
if variant_dir.srcdir:
if variant_dir.srcdir == src_dir:
return # We already did this.
raise SCons.Errors.UserError("'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir))
variant_dir.link(src_dir, duplicate)
def Repository(self, *dirs):
"""Specify Repository directories to search."""
for d in dirs:
if not isinstance(d, SCons.Node.Node):
d = self.Dir(d)
self.Top.addRepository(d)
def variant_dir_target_climb(self, orig, dir, tail):
"""Create targets in corresponding variant directories
Climb the directory tree, and look up path names
relative to any linked variant directories we find.
Even though this loops and walks up the tree, we don't memoize
the return value because this is really only used to process
the command-line targets.
"""
targets = []
message = None
fmt = "building associated VariantDir targets: %s"
start_dir = dir
while dir:
for bd in dir.variant_dirs:
if start_dir.is_under(bd):
# If already in the build-dir location, don't reflect
return [orig], fmt % str(orig)
p = os.path.join(bd.path, *tail)
targets.append(self.Entry(p))
tail = [dir.name] + tail
dir = dir.up()
if targets:
message = fmt % ' '.join(map(str, targets))
return targets, message
def Glob(self, pathname, ondisk=True, source=True, strings=False, cwd=None):
"""
Globs
This is mainly a shim layer
"""
if cwd is None:
cwd = self.getcwd()
return cwd.glob(pathname, ondisk, source, strings)
class DirNodeInfo(SCons.Node.NodeInfoBase):
# This should get reset by the FS initialization.
current_version_id = 1
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = _my_splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.labspath + '/' + s
return root._lookup_abs(s, Entry)
class DirBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
glob_magic_check = re.compile('[*?[]')
def has_glob_magic(s):
return glob_magic_check.search(s) is not None
class Dir(Base):
"""A class for directories in a file system.
"""
memoizer_counters = []
NodeInfo = DirNodeInfo
BuildInfo = DirBuildInfo
def __init__(self, name, directory, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.Dir')
Base.__init__(self, name, directory, fs)
self._morph()
def _morph(self):
"""Turn a file system Node (either a freshly initialized directory
object or a separate Entry object) into a proper directory object.
Set up this directory's entries and hook it into the file
system tree. Specify that directories (this Node) don't use
signatures for calculating whether they're current.
"""
self.repositories = []
self.srcdir = None
self.entries = {}
self.entries['.'] = self
self.entries['..'] = self.dir
self.cwd = self
self.searched = 0
self._sconsign = None
self.variant_dirs = []
self.root = self.dir.root
# For directories, we make a difference between the directory
# 'name' and the directory 'dirname'. The 'name' attribute is
# used when we need to print the 'name' of the directory or
# when we it is used as the last part of a path. The 'dirname'
# is used when the directory is not the last element of the
# path. The main reason for making that distinction is that
# for RoorDir's the dirname can not be easily inferred from
# the name. For example, we have to add a '/' after a drive
# letter but not after a UNC path prefix ('//').
self.dirname = self.name + OS_SEP
# Don't just reset the executor, replace its action list,
# because it might have some pre-or post-actions that need to
# be preserved.
#
# But don't reset the executor if there is a non-null executor
# attached already. The existing executor might have other
# targets, in which case replacing the action list with a
# Mkdir action is a big mistake.
if not hasattr(self, 'executor'):
self.builder = get_MkdirBuilder()
self.get_executor().set_action_list(self.builder.action)
else:
# Prepend MkdirBuilder action to existing action list
l = self.get_executor().action_list
a = get_MkdirBuilder().action
l.insert(0, a)
self.get_executor().set_action_list(l)
def diskcheck_match(self):
diskcheck_match(self, self.isfile,
"File %s found where directory expected.")
def __clearRepositoryCache(self, duplicate=None):
"""Called when we change the repository(ies) for a directory.
This clears any cached information that is invalidated by changing
the repository."""
for node in self.entries.values():
if node != self.dir:
if node != self and isinstance(node, Dir):
node.__clearRepositoryCache(duplicate)
else:
node.clear()
try:
del node._srcreps
except AttributeError:
pass
if duplicate is not None:
node.duplicate=duplicate
def __resetDuplicate(self, node):
if node != self:
node.duplicate = node.get_dir().duplicate
def Entry(self, name):
"""
Looks up or creates an entry node named 'name' relative to
this directory.
"""
return self.fs.Entry(name, self)
def Dir(self, name, create=True):
"""
Looks up or creates a directory node named 'name' relative to
this directory.
"""
return self.fs.Dir(name, self, create)
def File(self, name):
"""
Looks up or creates a file node named 'name' relative to
this directory.
"""
return self.fs.File(name, self)
def link(self, srcdir, duplicate):
"""Set this directory as the variant directory for the
supplied source directory."""
self.srcdir = srcdir
self.duplicate = duplicate
self.__clearRepositoryCache(duplicate)
srcdir.variant_dirs.append(self)
def getRepositories(self):
"""Returns a list of repositories for this directory.
"""
if self.srcdir and not self.duplicate:
return self.srcdir.get_all_rdirs() + self.repositories
return self.repositories
memoizer_counters.append(SCons.Memoize.CountValue('get_all_rdirs'))
def get_all_rdirs(self):
try:
return list(self._memo['get_all_rdirs'])
except KeyError:
pass
result = [self]
fname = '.'
dir = self
while dir:
for rep in dir.getRepositories():
result.append(rep.Dir(fname))
if fname == '.':
fname = dir.name
else:
fname = dir.name + OS_SEP + fname
dir = dir.up()
self._memo['get_all_rdirs'] = list(result)
return result
def addRepository(self, dir):
if dir != self and not dir in self.repositories:
self.repositories.append(dir)
dir.tpath = '.'
self.__clearRepositoryCache()
def up(self):
return self.dir
def _rel_path_key(self, other):
return str(other)
memoizer_counters.append(SCons.Memoize.CountDict('rel_path', _rel_path_key))
def rel_path(self, other):
"""Return a path to "other" relative to this directory.
"""
# This complicated and expensive method, which constructs relative
# paths between arbitrary Node.FS objects, is no longer used
# by SCons itself. It was introduced to store dependency paths
# in .sconsign files relative to the target, but that ended up
# being significantly inefficient.
#
# We're continuing to support the method because some SConstruct
# files out there started using it when it was available, and
# we're all about backwards compatibility..
try:
memo_dict = self._memo['rel_path']
except KeyError:
memo_dict = {}
self._memo['rel_path'] = memo_dict
else:
try:
return memo_dict[other]
except KeyError:
pass
if self is other:
result = '.'
elif not other in self.path_elements:
try:
other_dir = other.get_dir()
except AttributeError:
result = str(other)
else:
if other_dir is None:
result = other.name
else:
dir_rel_path = self.rel_path(other_dir)
if dir_rel_path == '.':
result = other.name
else:
result = dir_rel_path + OS_SEP + other.name
else:
i = self.path_elements.index(other) + 1
path_elems = ['..'] * (len(self.path_elements) - i) \
+ [n.name for n in other.path_elements[i:]]
result = OS_SEP.join(path_elems)
memo_dict[other] = result
return result
def get_env_scanner(self, env, kw={}):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_target_scanner(self):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_found_includes(self, env, scanner, path):
"""Return this directory's implicit dependencies.
We don't bother caching the results because the scan typically
shouldn't be requested more than once (as opposed to scanning
.h file contents, which can be requested as many times as the
files is #included by other files).
"""
if not scanner:
return []
# Clear cached info for this Dir. If we already visited this
# directory on our walk down the tree (because we didn't know at
# that point it was being used as the source for another Node)
# then we may have calculated build signature before realizing
# we had to scan the disk. Now that we have to, though, we need
# to invalidate the old calculated signature so that any node
# dependent on our directory structure gets one that includes
# info about everything on disk.
self.clear()
return scanner(self, env, path)
#
# Taskmaster interface subsystem
#
def prepare(self):
pass
def build(self, **kw):
"""A null "builder" for directories."""
global MkdirBuilder
if self.builder is not MkdirBuilder:
SCons.Node.Node.build(self, **kw)
#
#
#
def _create(self):
"""Create this directory, silently and without worrying about
whether the builder is the default or not."""
listDirs = []
parent = self
while parent:
if parent.exists():
break
listDirs.append(parent)
p = parent.up()
if p is None:
# Don't use while: - else: for this condition because
# if so, then parent is None and has no .path attribute.
raise SCons.Errors.StopError(parent.path)
parent = p
listDirs.reverse()
for dirnode in listDirs:
try:
# Don't call dirnode.build(), call the base Node method
# directly because we definitely *must* create this
# directory. The dirnode.build() method will suppress
# the build if it's the default builder.
SCons.Node.Node.build(dirnode)
dirnode.get_executor().nullify()
# The build() action may or may not have actually
# created the directory, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
dirnode.clear()
except OSError:
pass
def multiple_side_effect_has_builder(self):
global MkdirBuilder
return self.builder is not MkdirBuilder and self.has_builder()
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
return self.fs.variant_dir_target_climb(self, self, [])
def scanner_key(self):
"""A directory does not get scanned."""
return None
def get_text_contents(self):
"""We already emit things in text, so just return the binary
version."""
return self.get_contents()
def get_contents(self):
"""Return content signatures and names of all our children
separated by new-lines. Ensure that the nodes are sorted."""
contents = []
for node in sorted(self.children(), key=lambda t: t.name):
contents.append('%s %s\n' % (node.get_csig(), node.name))
return ''.join(contents)
def get_csig(self):
"""Compute the content signature for Directory nodes. In
general, this is not needed and the content signature is not
stored in the DirNodeInfo. However, if get_contents on a Dir
node is called which has a child directory, the child
directory should return the hash of its contents."""
contents = self.get_contents()
return SCons.Util.MD5signature(contents)
def do_duplicate(self, src):
pass
changed_since_last_build = SCons.Node.Node.state_has_changed
def is_up_to_date(self):
"""If any child is not up-to-date, then this directory isn't,
either."""
if self.builder is not MkdirBuilder and not self.exists():
return 0
up_to_date = SCons.Node.up_to_date
for kid in self.children():
if kid.get_state() > up_to_date:
return 0
return 1
def rdir(self):
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try: node = dir.entries[norm_name]
except KeyError: node = dir.dir_on_disk(self.name)
if node and node.exists() and \
(isinstance(dir, Dir) or isinstance(dir, Entry)):
return node
return self
def sconsign(self):
"""Return the .sconsign file info for this directory,
creating it first if necessary."""
if not self._sconsign:
import SCons.SConsign
self._sconsign = SCons.SConsign.ForDirectory(self)
return self._sconsign
def srcnode(self):
"""Dir has a special need for srcnode()...if we
have a srcdir attribute set, then that *is* our srcnode."""
if self.srcdir:
return self.srcdir
return Base.srcnode(self)
def get_timestamp(self):
"""Return the latest timestamp from among our children"""
stamp = 0
for kid in self.children():
if kid.get_timestamp() > stamp:
stamp = kid.get_timestamp()
return stamp
def entry_abspath(self, name):
return self.abspath + OS_SEP + name
def entry_labspath(self, name):
return self.labspath + '/' + name
def entry_path(self, name):
return self.path + OS_SEP + name
def entry_tpath(self, name):
return self.tpath + OS_SEP + name
def entry_exists_on_disk(self, name):
try:
d = self.on_disk_entries
except AttributeError:
d = {}
try:
entries = os.listdir(self.abspath)
except OSError:
pass
else:
for entry in map(_my_normcase, entries):
d[entry] = True
self.on_disk_entries = d
if sys.platform == 'win32':
name = _my_normcase(name)
result = d.get(name)
if result is None:
# Belt-and-suspenders for Windows: check directly for
# 8.3 file names that don't show up in os.listdir().
result = os.path.exists(self.abspath + OS_SEP + name)
d[name] = result
return result
else:
return name in d
memoizer_counters.append(SCons.Memoize.CountValue('srcdir_list'))
def srcdir_list(self):
try:
return self._memo['srcdir_list']
except KeyError:
pass
result = []
dirname = '.'
dir = self
while dir:
if dir.srcdir:
result.append(dir.srcdir.Dir(dirname))
dirname = dir.name + OS_SEP + dirname
dir = dir.up()
self._memo['srcdir_list'] = result
return result
def srcdir_duplicate(self, name):
for dir in self.srcdir_list():
if self.is_under(dir):
# We shouldn't source from something in the build path;
# variant_dir is probably under src_dir, in which case
# we are reflecting.
break
if dir.entry_exists_on_disk(name):
srcnode = dir.Entry(name).disambiguate()
if self.duplicate:
node = self.Entry(name).disambiguate()
node.do_duplicate(srcnode)
return node
else:
return srcnode
return None
def _srcdir_find_file_key(self, filename):
return filename
memoizer_counters.append(SCons.Memoize.CountDict('srcdir_find_file', _srcdir_find_file_key))
def srcdir_find_file(self, filename):
try:
memo_dict = self._memo['srcdir_find_file']
except KeyError:
memo_dict = {}
self._memo['srcdir_find_file'] = memo_dict
else:
try:
return memo_dict[filename]
except KeyError:
pass
def func(node):
if (isinstance(node, File) or isinstance(node, Entry)) and \
(node.is_derived() or node.exists()):
return node
return None
norm_name = _my_normcase(filename)
for rdir in self.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (node, self)
memo_dict[filename] = result
return result
for srcdir in self.srcdir_list():
for rdir in srcdir.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (File(filename, self, self.fs), srcdir)
memo_dict[filename] = result
return result
result = (None, None)
memo_dict[filename] = result
return result
def dir_on_disk(self, name):
if self.entry_exists_on_disk(name):
try: return self.Dir(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, File):
return None
return node
def file_on_disk(self, name):
if self.entry_exists_on_disk(name) or \
diskcheck_rcs(self, name) or \
diskcheck_sccs(self, name):
try: return self.File(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, Dir):
return None
return node
def walk(self, func, arg):
"""
Walk this directory tree by calling the specified function
for each directory in the tree.
This behaves like the os.path.walk() function, but for in-memory
Node.FS.Dir objects. The function takes the same arguments as
the functions passed to os.path.walk():
func(arg, dirname, fnames)
Except that "dirname" will actually be the directory *Node*,
not the string. The '.' and '..' entries are excluded from
fnames. The fnames list may be modified in-place to filter the
subdirectories visited or otherwise impose a specific order.
The "arg" argument is always passed to func() and may be used
in any way (or ignored, passing None is common).
"""
entries = self.entries
names = list(entries.keys())
names.remove('.')
names.remove('..')
func(arg, self, names)
for dirname in [n for n in names if isinstance(entries[n], Dir)]:
entries[dirname].walk(func, arg)
def glob(self, pathname, ondisk=True, source=False, strings=False):
"""
Returns a list of Nodes (or strings) matching a specified
pathname pattern.
Pathname patterns follow UNIX shell semantics: * matches
any-length strings of any characters, ? matches any character,
and [] can enclose lists or ranges of characters. Matches do
not span directory separators.
The matches take into account Repositories, returning local
Nodes if a corresponding entry exists in a Repository (either
an in-memory Node or something on disk).
By defafult, the glob() function matches entries that exist
on-disk, in addition to in-memory Nodes. Setting the "ondisk"
argument to False (or some other non-true value) causes the glob()
function to only match in-memory Nodes. The default behavior is
to return both the on-disk and in-memory Nodes.
The "source" argument, when true, specifies that corresponding
source Nodes must be returned if you're globbing in a build
directory (initialized with VariantDir()). The default behavior
is to return Nodes local to the VariantDir().
The "strings" argument, when true, returns the matches as strings,
not Nodes. The strings are path names relative to this directory.
The underlying algorithm is adapted from the glob.glob() function
in the Python library (but heavily modified), and uses fnmatch()
under the covers.
"""
dirname, basename = os.path.split(pathname)
if not dirname:
return sorted(self._glob1(basename, ondisk, source, strings),
key=lambda t: str(t))
if has_glob_magic(dirname):
list = self.glob(dirname, ondisk, source, strings=False)
else:
list = [self.Dir(dirname, create=True)]
result = []
for dir in list:
r = dir._glob1(basename, ondisk, source, strings)
if strings:
r = [os.path.join(str(dir), x) for x in r]
result.extend(r)
return sorted(result, key=lambda a: str(a))
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
"""
Globs for and returns a list of entry names matching a single
pattern in this directory.
This searches any repositories and source directories for
corresponding entries and returns a Node (or string) relative
to the current directory if an entry is found anywhere.
TODO: handle pattern with no wildcard
"""
search_dir_list = self.get_all_rdirs()
for srcdir in self.srcdir_list():
search_dir_list.extend(srcdir.get_all_rdirs())
selfEntry = self.Entry
names = []
for dir in search_dir_list:
# We use the .name attribute from the Node because the keys of
# the dir.entries dictionary are normalized (that is, all upper
# case) on case-insensitive systems like Windows.
node_names = [ v.name for k, v in dir.entries.items()
if k not in ('.', '..') ]
names.extend(node_names)
if not strings:
# Make sure the working directory (self) actually has
# entries for all Nodes in repositories or variant dirs.
for name in node_names: selfEntry(name)
if ondisk:
try:
disk_names = os.listdir(dir.abspath)
except os.error:
continue
names.extend(disk_names)
if not strings:
# We're going to return corresponding Nodes in
# the local directory, so we need to make sure
# those Nodes exist. We only want to create
# Nodes for the entries that will match the
# specified pattern, though, which means we
# need to filter the list here, even though
# the overall list will also be filtered later,
# after we exit this loop.
if pattern[0] != '.':
#disk_names = [ d for d in disk_names if d[0] != '.' ]
disk_names = [x for x in disk_names if x[0] != '.']
disk_names = fnmatch.filter(disk_names, pattern)
dirEntry = dir.Entry
for name in disk_names:
# Add './' before disk filename so that '#' at
# beginning of filename isn't interpreted.
name = './' + name
node = dirEntry(name).disambiguate()
n = selfEntry(name)
if n.__class__ != node.__class__:
n.__class__ = node.__class__
n._morph()
names = set(names)
if pattern[0] != '.':
#names = [ n for n in names if n[0] != '.' ]
names = [x for x in names if x[0] != '.']
names = fnmatch.filter(names, pattern)
if strings:
return names
#return [ self.entries[_my_normcase(n)] for n in names ]
return [self.entries[_my_normcase(n)] for n in names]
class RootDir(Dir):
"""A class for the root directory of a file system.
This is the same as a Dir class, except that the path separator
('/' or '\\') is actually part of the name, so we don't need to
add a separator when creating the path names of entries within
this directory.
"""
def __init__(self, drive, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.RootDir')
# We're going to be our own parent directory (".." entry and .dir
# attribute) so we have to set up some values so Base.__init__()
# won't gag won't it calls some of our methods.
self.abspath = ''
self.labspath = ''
self.path = ''
self.tpath = ''
self.path_elements = []
self.duplicate = 0
self.root = self
# Handle all the types of drives:
if drive == '':
# No drive, regular UNIX root or Windows default drive.
name = OS_SEP
dirname = OS_SEP
elif drive == '//':
# UNC path
name = UNC_PREFIX
dirname = UNC_PREFIX
else:
# Windows drive letter
name = drive
dirname = drive + OS_SEP
Base.__init__(self, name, self, fs)
# Now set our paths to what we really want them to be. The
# name should already contain any necessary separators, such
# as the initial drive letter (the name) plus the directory
# separator, except for the "lookup abspath," which does not
# have the drive letter.
self.abspath = dirname
self.labspath = ''
self.path = dirname
self.tpath = dirname
self._morph()
# Must be reset after Dir._morph() is invoked...
self.dirname = dirname
self._lookupDict = {}
self._lookupDict[''] = self
self._lookupDict['/'] = self
# The // entry is necessary because os.path.normpath()
# preserves double slashes at the beginning of a path on Posix
# platforms.
if not has_unc:
self._lookupDict['//'] = self
def must_be_same(self, klass):
if klass is Dir:
return
Base.must_be_same(self, klass)
def _lookup_abs(self, p, klass, create=1):
"""
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If a Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
"""
k = _my_normcase(p)
try:
result = self._lookupDict[k]
except KeyError:
if not create:
msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self))
raise SCons.Errors.UserError(msg)
# There is no Node for this path name, and we're allowed
# to create it.
# (note: would like to use p.rsplit('/',1) here but
# that's not in python 2.3)
# e.g.: dir_name, file_name = p.rsplit('/',1)
last_slash = p.rindex('/')
if (last_slash >= 0):
dir_name = p[:last_slash]
file_name = p[last_slash+1:]
else:
dir_name = p # shouldn't happen, just in case
file_name = ''
dir_node = self._lookup_abs(dir_name, Dir)
result = klass(file_name, dir_node, self.fs)
# Double-check on disk (as configured) that the Node we
# created matches whatever is out there in the real world.
result.diskcheck_match()
self._lookupDict[k] = result
dir_node.entries[_my_normcase(file_name)] = result
dir_node.implicit = None
else:
# There is already a Node for this path name. Allow it to
# complain if we were looking for an inappropriate type.
result.must_be_same(klass)
return result
def __str__(self):
return self.abspath
def entry_abspath(self, name):
return self.abspath + name
def entry_labspath(self, name):
return '/' + name
def entry_path(self, name):
return self.path + name
def entry_tpath(self, name):
return self.tpath + name
def is_under(self, dir):
if self is dir:
return 1
else:
return 0
def up(self):
return None
def get_dir(self):
return None
def src_builder(self):
return _null
class FileNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig', 'timestamp', 'size']
# This should get reset by the FS initialization.
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = _my_splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.labspath + '/' + s
return root._lookup_abs(s, Entry)
class FileBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
def convert_to_sconsign(self):
"""
Converts this FileBuildInfo object for writing to a .sconsign file
This replaces each Node in our various dependency lists with its
usual string representation: relative to the top-level SConstruct
directory, or an absolute path if it's outside.
"""
if os_sep_is_slash:
node_to_str = str
else:
def node_to_str(n):
try:
s = n.path
except AttributeError:
s = str(n)
else:
s = s.replace(OS_SEP, '/')
return s
for attr in ['bsources', 'bdepends', 'bimplicit']:
try:
val = getattr(self, attr)
except AttributeError:
pass
else:
setattr(self, attr, list(map(node_to_str, val)))
def convert_from_sconsign(self, dir, name):
"""
Converts a newly-read FileBuildInfo object for in-SCons use
For normal up-to-date checking, we don't have any conversion to
perform--but we're leaving this method here to make that clear.
"""
pass
def prepare_dependencies(self):
"""
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
"""
attrs = [
('bsources', 'bsourcesigs'),
('bdepends', 'bdependsigs'),
('bimplicit', 'bimplicitsigs'),
]
for (nattr, sattr) in attrs:
try:
strings = getattr(self, nattr)
nodeinfos = getattr(self, sattr)
except AttributeError:
continue
nodes = []
for s, ni in zip(strings, nodeinfos):
if not isinstance(s, SCons.Node.Node):
s = ni.str_to_node(s)
nodes.append(s)
setattr(self, nattr, nodes)
def format(self, names=0):
result = []
bkids = self.bsources + self.bdepends + self.bimplicit
bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs
for bkid, bkidsig in zip(bkids, bkidsigs):
result.append(str(bkid) + ': ' +
' '.join(bkidsig.format(names=names)))
result.append('%s [%s]' % (self.bactsig, self.bact))
return '\n'.join(result)
class File(Base):
"""A class for files in a file system.
"""
memoizer_counters = []
NodeInfo = FileNodeInfo
BuildInfo = FileBuildInfo
md5_chunksize = 64
def diskcheck_match(self):
diskcheck_match(self, self.isdir,
"Directory %s found where file expected.")
def __init__(self, name, directory, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.File')
Base.__init__(self, name, directory, fs)
self._morph()
def Entry(self, name):
"""Create an entry node named 'name' relative to
the directory of this file."""
return self.dir.Entry(name)
def Dir(self, name, create=True):
"""Create a directory node named 'name' relative to
the directory of this file."""
return self.dir.Dir(name, create=create)
def Dirs(self, pathlist):
"""Create a list of directories relative to the SConscript
directory of this file."""
return [self.Dir(p) for p in pathlist]
def File(self, name):
"""Create a file node named 'name' relative to
the directory of this file."""
return self.dir.File(name)
#def generate_build_dict(self):
# """Return an appropriate dictionary of values for building
# this File."""
# return {'Dir' : self.Dir,
# 'File' : self.File,
# 'RDirs' : self.RDirs}
def _morph(self):
"""Turn a file system node into a File object."""
self.scanner_paths = {}
if not hasattr(self, '_local'):
self._local = 0
# If there was already a Builder set on this entry, then
# we need to make sure we call the target-decider function,
# not the source-decider. Reaching in and doing this by hand
# is a little bogus. We'd prefer to handle this by adding
# an Entry.builder_set() method that disambiguates like the
# other methods, but that starts running into problems with the
# fragile way we initialize Dir Nodes with their Mkdir builders,
# yet still allow them to be overridden by the user. Since it's
# not clear right now how to fix that, stick with what works
# until it becomes clear...
if self.has_builder():
self.changed_since_last_build = self.decide_target
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
if not self.rexists():
return ''
fname = self.rfile().abspath
try:
contents = open(fname, "rb").read()
except EnvironmentError, e:
if not e.filename:
e.filename = fname
raise
return contents
# This attempts to figure out what the encoding of the text is
# based upon the BOM bytes, and then decodes the contents so that
# it's a valid python string.
def get_text_contents(self):
contents = self.get_contents()
# The behavior of various decode() methods and functions
# w.r.t. the initial BOM bytes is different for different
# encodings and/or Python versions. ('utf-8' does not strip
# them, but has a 'utf-8-sig' which does; 'utf-16' seems to
# strip them; etc.) Just sidestep all the complication by
# explicitly stripping the BOM before we decode().
if contents.startswith(codecs.BOM_UTF8):
return contents[len(codecs.BOM_UTF8):].decode('utf-8')
if contents.startswith(codecs.BOM_UTF16_LE):
return contents[len(codecs.BOM_UTF16_LE):].decode('utf-16-le')
if contents.startswith(codecs.BOM_UTF16_BE):
return contents[len(codecs.BOM_UTF16_BE):].decode('utf-16-be')
return contents
def get_content_hash(self):
"""
Compute and return the MD5 hash for this file.
"""
if not self.rexists():
return SCons.Util.MD5signature('')
fname = self.rfile().abspath
try:
cs = SCons.Util.MD5filesignature(fname,
chunksize=SCons.Node.FS.File.md5_chunksize*1024)
except EnvironmentError, e:
if not e.filename:
e.filename = fname
raise
return cs
memoizer_counters.append(SCons.Memoize.CountValue('get_size'))
def get_size(self):
try:
return self._memo['get_size']
except KeyError:
pass
if self.rexists():
size = self.rfile().getsize()
else:
size = 0
self._memo['get_size'] = size
return size
memoizer_counters.append(SCons.Memoize.CountValue('get_timestamp'))
def get_timestamp(self):
try:
return self._memo['get_timestamp']
except KeyError:
pass
if self.rexists():
timestamp = self.rfile().getmtime()
else:
timestamp = 0
self._memo['get_timestamp'] = timestamp
return timestamp
def store_info(self):
# Merge our build information into the already-stored entry.
# This accomodates "chained builds" where a file that's a target
# in one build (SConstruct file) is a source in a different build.
# See test/chained-build.py for the use case.
if do_store_info:
self.dir.sconsign().store_info(self.name, self)
convert_copy_attrs = [
'bsources',
'bimplicit',
'bdepends',
'bact',
'bactsig',
'ninfo',
]
convert_sig_attrs = [
'bsourcesigs',
'bimplicitsigs',
'bdependsigs',
]
def convert_old_entry(self, old_entry):
# Convert a .sconsign entry from before the Big Signature
# Refactoring, doing what we can to convert its information
# to the new .sconsign entry format.
#
# The old format looked essentially like this:
#
# BuildInfo
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .bsources
# .bsourcesigs ("signature" list)
# .bdepends
# .bdependsigs ("signature" list)
# .bimplicit
# .bimplicitsigs ("signature" list)
# .bact
# .bactsig
#
# The new format looks like this:
#
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .binfo (BuildInfo)
# .bsources
# .bsourcesigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bdepends
# .bdependsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bimplicit
# .bimplicitsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bact
# .bactsig
#
# The basic idea of the new structure is that a NodeInfo always
# holds all available information about the state of a given Node
# at a certain point in time. The various .b*sigs lists can just
# be a list of pointers to the .ninfo attributes of the different
# dependent nodes, without any copying of information until it's
# time to pickle it for writing out to a .sconsign file.
#
# The complicating issue is that the *old* format only stored one
# "signature" per dependency, based on however the *last* build
# was configured. We don't know from just looking at it whether
# it was a build signature, a content signature, or a timestamp
# "signature". Since we no longer use build signatures, the
# best we can do is look at the length and if it's thirty two,
# assume that it was (or might have been) a content signature.
# If it was actually a build signature, then it will cause a
# rebuild anyway when it doesn't match the new content signature,
# but that's probably the best we can do.
import SCons.SConsign
new_entry = SCons.SConsign.SConsignEntry()
new_entry.binfo = self.new_binfo()
binfo = new_entry.binfo
for attr in self.convert_copy_attrs:
try:
value = getattr(old_entry, attr)
except AttributeError:
continue
setattr(binfo, attr, value)
delattr(old_entry, attr)
for attr in self.convert_sig_attrs:
try:
sig_list = getattr(old_entry, attr)
except AttributeError:
continue
value = []
for sig in sig_list:
ninfo = self.new_ninfo()
if len(sig) == 32:
ninfo.csig = sig
else:
ninfo.timestamp = sig
value.append(ninfo)
setattr(binfo, attr, value)
delattr(old_entry, attr)
return new_entry
memoizer_counters.append(SCons.Memoize.CountValue('get_stored_info'))
def get_stored_info(self):
try:
return self._memo['get_stored_info']
except KeyError:
pass
try:
sconsign_entry = self.dir.sconsign().get_entry(self.name)
except (KeyError, EnvironmentError):
import SCons.SConsign
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = self.new_binfo()
sconsign_entry.ninfo = self.new_ninfo()
else:
if isinstance(sconsign_entry, FileBuildInfo):
# This is a .sconsign file from before the Big Signature
# Refactoring; convert it as best we can.
sconsign_entry = self.convert_old_entry(sconsign_entry)
try:
delattr(sconsign_entry.ninfo, 'bsig')
except AttributeError:
pass
self._memo['get_stored_info'] = sconsign_entry
return sconsign_entry
def get_stored_implicit(self):
binfo = self.get_stored_info().binfo
binfo.prepare_dependencies()
try: return binfo.bimplicit
except AttributeError: return None
def rel_path(self, other):
return self.dir.rel_path(other)
def _get_found_includes_key(self, env, scanner, path):
return (id(env), id(scanner), path)
memoizer_counters.append(SCons.Memoize.CountDict('get_found_includes', _get_found_includes_key))
def get_found_includes(self, env, scanner, path):
"""Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested.
"""
memo_key = (id(env), id(scanner), path)
try:
memo_dict = self._memo['get_found_includes']
except KeyError:
memo_dict = {}
self._memo['get_found_includes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if scanner:
# result = [n.disambiguate() for n in scanner(self, env, path)]
result = scanner(self, env, path)
result = [N.disambiguate() for N in result]
else:
result = []
memo_dict[memo_key] = result
return result
def _createDir(self):
# ensure that the directories for this node are
# created.
self.dir._create()
def push_to_cache(self):
"""Try to push the node into a cache
"""
# This should get called before the Nodes' .built() method is
# called, which would clear the build signature if the file has
# a source scanner.
#
# We have to clear the local memoized values *before* we push
# the node to cache so that the memoization of the self.exists()
# return value doesn't interfere.
if self.nocache:
return
self.clear_memoized_values()
if self.exists():
self.get_build_env().get_CacheDir().push(self)
def retrieve_from_cache(self):
"""Try to retrieve the node's content from a cache
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Returns true if the node was successfully retrieved.
"""
if self.nocache:
return None
if not self.is_derived():
return None
return self.get_build_env().get_CacheDir().retrieve(self)
def visited(self):
if self.exists():
self.get_build_env().get_CacheDir().push_if_forced(self)
ninfo = self.get_ninfo()
csig = self.get_max_drift_csig()
if csig:
ninfo.csig = csig
ninfo.timestamp = self.get_timestamp()
ninfo.size = self.get_size()
if not self.has_builder():
# This is a source file, but it might have been a target file
# in another build that included more of the DAG. Copy
# any build information that's stored in the .sconsign file
# into our binfo object so it doesn't get lost.
old = self.get_stored_info()
self.get_binfo().__dict__.update(old.binfo.__dict__)
self.store_info()
def find_src_builder(self):
if self.rexists():
return None
scb = self.dir.src_builder()
if scb is _null:
if diskcheck_sccs(self.dir, self.name):
scb = get_DefaultSCCSBuilder()
elif diskcheck_rcs(self.dir, self.name):
scb = get_DefaultRCSBuilder()
else:
scb = None
if scb is not None:
try:
b = self.builder
except AttributeError:
b = None
if b is None:
self.builder_set(scb)
return scb
def has_src_builder(self):
"""Return whether this Node has a source builder or not.
If this Node doesn't have an explicit source code builder, this
is where we figure out, on the fly, if there's a transparent
source code builder for it.
Note that if we found a source builder, we also set the
self.builder attribute, so that all of the methods that actually
*build* this file don't have to do anything different.
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.sbuilder = self.find_src_builder()
return scb is not None
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
if self.is_derived():
return [], None
return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
def _rmv_existing(self):
self.clear_memoized_values()
if print_duplicate:
print "dup: removing existing target %s"%self
e = Unlink(self, [], None)
if isinstance(e, SCons.Errors.BuildError):
raise e
#
# Taskmaster interface subsystem
#
def make_ready(self):
self.has_src_builder()
self.get_binfo()
def prepare(self):
"""Prepare for this file to be created."""
SCons.Node.Node.prepare(self)
if self.get_state() != SCons.Node.up_to_date:
if self.exists():
if self.is_derived() and not self.precious:
self._rmv_existing()
else:
try:
self._createDir()
except SCons.Errors.StopError, drive:
desc = "No drive `%s' for target `%s'." % (drive, self)
raise SCons.Errors.StopError(desc)
#
#
#
def remove(self):
"""Remove this file."""
if self.exists() or self.islink():
self.fs.unlink(self.path)
return 1
return None
def do_duplicate(self, src):
self._createDir()
if print_duplicate:
print "dup: relinking variant '%s' from '%s'"%(self, src)
Unlink(self, None, None)
e = Link(self, src, None)
if isinstance(e, SCons.Errors.BuildError):
desc = "Cannot duplicate `%s' in `%s': %s." % (src.path, self.dir.path, e.errstr)
raise SCons.Errors.StopError(desc)
self.linked = 1
# The Link() action may or may not have actually
# created the file, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
self.clear()
memoizer_counters.append(SCons.Memoize.CountValue('exists'))
def exists(self):
try:
return self._memo['exists']
except KeyError:
pass
# Duplicate from source path if we are set up to do this.
if self.duplicate and not self.is_derived() and not self.linked:
src = self.srcnode()
if src is not self:
# At this point, src is meant to be copied in a variant directory.
src = src.rfile()
if src.abspath != self.abspath:
if src.exists():
self.do_duplicate(src)
# Can't return 1 here because the duplication might
# not actually occur if the -n option is being used.
else:
# The source file does not exist. Make sure no old
# copy remains in the variant directory.
if print_duplicate:
print "dup: no src for %s, unlinking old variant copy"%self
if Base.exists(self) or self.islink():
self.fs.unlink(self.path)
# Return None explicitly because the Base.exists() call
# above will have cached its value if the file existed.
self._memo['exists'] = None
return None
result = Base.exists(self)
self._memo['exists'] = result
return result
#
# SIGNATURE SUBSYSTEM
#
def get_max_drift_csig(self):
"""
Returns the content signature currently stored for this node
if it's been unmodified longer than the max_drift value, or the
max_drift value is 0. Returns None otherwise.
"""
old = self.get_stored_info()
mtime = self.get_timestamp()
max_drift = self.fs.max_drift
if max_drift > 0:
if (time.time() - mtime) > max_drift:
try:
n = old.ninfo
if n.timestamp and n.csig and n.timestamp == mtime:
return n.csig
except AttributeError:
pass
elif max_drift == 0:
try:
return old.ninfo.csig
except AttributeError:
pass
return None
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
ninfo = self.get_ninfo()
try:
return ninfo.csig
except AttributeError:
pass
csig = self.get_max_drift_csig()
if csig is None:
try:
if self.get_size() < SCons.Node.FS.File.md5_chunksize:
contents = self.get_contents()
else:
csig = self.get_content_hash()
except IOError:
# This can happen if there's actually a directory on-disk,
# which can be the case if they've disabled disk checks,
# or if an action with a File target actually happens to
# create a same-named directory by mistake.
csig = ''
else:
if not csig:
csig = SCons.Util.MD5signature(contents)
ninfo.csig = csig
return csig
#
# DECISION SUBSYSTEM
#
def builder_set(self, builder):
SCons.Node.Node.builder_set(self, builder)
self.changed_since_last_build = self.decide_target
def changed_content(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def changed_state(self, target, prev_ni):
return self.state != SCons.Node.up_to_date
def changed_timestamp_then_content(self, target, prev_ni):
if not self.changed_timestamp_match(target, prev_ni):
try:
self.get_ninfo().csig = prev_ni.csig
except AttributeError:
pass
return False
return self.changed_content(target, prev_ni)
def changed_timestamp_newer(self, target, prev_ni):
try:
return self.get_timestamp() > target.get_timestamp()
except AttributeError:
return 1
def changed_timestamp_match(self, target, prev_ni):
try:
return self.get_timestamp() != prev_ni.timestamp
except AttributeError:
return 1
def decide_source(self, target, prev_ni):
return target.get_build_env().decide_source(self, target, prev_ni)
def decide_target(self, target, prev_ni):
return target.get_build_env().decide_target(self, target, prev_ni)
# Initialize this Node's decider function to decide_source() because
# every file is a source file until it has a Builder attached...
changed_since_last_build = decide_source
def is_up_to_date(self):
T = 0
if T: Trace('is_up_to_date(%s):' % self)
if not self.exists():
if T: Trace(' not self.exists():')
# The file doesn't exist locally...
r = self.rfile()
if r != self:
# ...but there is one in a Repository...
if not self.changed(r):
if T: Trace(' changed(%s):' % r)
# ...and it's even up-to-date...
if self._local:
# ...and they'd like a local copy.
e = LocalCopy(self, r, None)
if isinstance(e, SCons.Errors.BuildError):
raise
self.store_info()
if T: Trace(' 1\n')
return 1
self.changed()
if T: Trace(' None\n')
return None
else:
r = self.changed()
if T: Trace(' self.exists(): %s\n' % r)
return not r
memoizer_counters.append(SCons.Memoize.CountValue('rfile'))
def rfile(self):
try:
return self._memo['rfile']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try: node = dir.entries[norm_name]
except KeyError: node = dir.file_on_disk(self.name)
if node and node.exists() and \
(isinstance(node, File) or isinstance(node, Entry) \
or not node.is_derived()):
result = node
# Copy over our local attributes to the repository
# Node so we identify shared object files in the
# repository and don't assume they're static.
#
# This isn't perfect; the attribute would ideally
# be attached to the object in the repository in
# case it was built statically in the repository
# and we changed it to shared locally, but that's
# rarely the case and would only occur if you
# intentionally used the same suffix for both
# shared and static objects anyway. So this
# should work well in practice.
result.attributes = self.attributes
break
self._memo['rfile'] = result
return result
def rstr(self):
return str(self.rfile())
def get_cachedir_csig(self):
"""
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig.
"""
try:
return self.cachedir_csig
except AttributeError:
pass
cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self)
if not self.exists() and cachefile and os.path.exists(cachefile):
self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \
SCons.Node.FS.File.md5_chunksize * 1024)
else:
self.cachedir_csig = self.get_csig()
return self.cachedir_csig
def get_cachedir_bsig(self):
try:
return self.cachesig
except AttributeError:
pass
# Add the path to the cache signature, because multiple
# targets built by the same action will all have the same
# build signature, and we have to differentiate them somehow.
children = self.children()
executor = self.get_executor()
# sigs = [n.get_cachedir_csig() for n in children]
sigs = [n.get_cachedir_csig() for n in children]
sigs.append(SCons.Util.MD5signature(executor.get_contents()))
sigs.append(self.path)
result = self.cachesig = SCons.Util.MD5collect(sigs)
return result
default_fs = None
def get_default_fs():
global default_fs
if not default_fs:
default_fs = FS()
return default_fs
class FileFinder(object):
"""
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self):
self._memo = {}
def filedir_lookup(self, p, fd=None):
"""
A helper method for find_file() that looks up a directory for
a file we're trying to find. This only creates the Dir Node if
it exists on-disk, since if the directory doesn't exist we know
we won't find any files in it... :-)
It would be more compact to just use this as a nested function
with a default keyword argument (see the commented-out version
below), but that doesn't work unless you have nested scopes,
so we define it here just so this work under Python 1.5.2.
"""
if fd is None:
fd = self.default_filedir
dir, name = os.path.split(fd)
drive, d = _my_splitdrive(dir)
if not name and d[:1] in ('/', OS_SEP):
#return p.fs.get_root(drive).dir_on_disk(name)
return p.fs.get_root(drive)
if dir:
p = self.filedir_lookup(p, dir)
if not p:
return None
norm_name = _my_normcase(name)
try:
node = p.entries[norm_name]
except KeyError:
return p.dir_on_disk(name)
if isinstance(node, Dir):
return node
if isinstance(node, Entry):
node.must_be_same(Dir)
return node
return None
def _find_file_key(self, filename, paths, verbose=None):
return (filename, paths)
memoizer_counters.append(SCons.Memoize.CountDict('find_file', _find_file_key))
def find_file(self, filename, paths, verbose=None):
"""
find_file(str, [Dir()]) -> [nodes]
filename - a filename to find
paths - a list of directory path *nodes* to search in. Can be
represented as a list, a tuple, or a callable that is
called with no arguments and returns the list or tuple.
returns - the node created from the found file.
Find a node corresponding to either a derived file or a file
that exists already.
Only the first file found is returned, and none is returned
if no file is found.
"""
memo_key = self._find_file_key(filename, paths)
try:
memo_dict = self._memo['find_file']
except KeyError:
memo_dict = {}
self._memo['find_file'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if verbose and not callable(verbose):
if not SCons.Util.is_String(verbose):
verbose = "find_file"
_verbose = u' %s: ' % verbose
verbose = lambda s: sys.stdout.write(_verbose + s)
filedir, filename = os.path.split(filename)
if filedir:
# More compact code that we can't use until we drop
# support for Python 1.5.2:
#
#def filedir_lookup(p, fd=filedir):
# """
# A helper function that looks up a directory for a file
# we're trying to find. This only creates the Dir Node
# if it exists on-disk, since if the directory doesn't
# exist we know we won't find any files in it... :-)
# """
# dir, name = os.path.split(fd)
# if dir:
# p = filedir_lookup(p, dir)
# if not p:
# return None
# norm_name = _my_normcase(name)
# try:
# node = p.entries[norm_name]
# except KeyError:
# return p.dir_on_disk(name)
# if isinstance(node, Dir):
# return node
# if isinstance(node, Entry):
# node.must_be_same(Dir)
# return node
# if isinstance(node, Dir) or isinstance(node, Entry):
# return node
# return None
#paths = [_f for _f in map(filedir_lookup, paths) if _f]
self.default_filedir = filedir
paths = [_f for _f in map(self.filedir_lookup, paths) if _f]
result = None
for dir in paths:
if verbose:
verbose("looking for '%s' in '%s' ...\n" % (filename, dir))
node, d = dir.srcdir_find_file(filename)
if node:
if verbose:
verbose("... FOUND '%s' in '%s'\n" % (filename, d))
result = node
break
memo_dict[memo_key] = result
return result
find_file = FileFinder().find_file
def invalidate_node_memos(targets):
"""
Invalidate the memoized values of all Nodes (files or directories)
that are associated with the given entries. Has been added to
clear the cache of nodes affected by a direct execution of an
action (e.g. Delete/Copy/Chmod). Existing Node caches become
inconsistent if the action is run through Execute(). The argument
`targets` can be a single Node object or filename, or a sequence
of Nodes/filenames.
"""
from traceback import extract_stack
# First check if the cache really needs to be flushed. Only
# actions run in the SConscript with Execute() seem to be
# affected. XXX The way to check if Execute() is in the stacktrace
# is a very dirty hack and should be replaced by a more sensible
# solution.
for f in extract_stack():
if f[2] == 'Execute' and f[0][-14:] == 'Environment.py':
break
else:
# Dont have to invalidate, so return
return
if not SCons.Util.is_List(targets):
targets = [targets]
for entry in targets:
# If the target is a Node object, clear the cache. If it is a
# filename, look up potentially existing Node object first.
try:
entry.clear_memoized_values()
except AttributeError:
# Not a Node object, try to look up Node by filename. XXX
# This creates Node objects even for those filenames which
# do not correspond to an existing Node object.
node = get_default_fs().Entry(entry)
if node:
node.clear_memoized_values()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Node
The Node package for the SCons software construction utility.
This is, in many ways, the heart of SCons.
A Node is where we encapsulate all of the dependency information about
any thing that SCons can build, or about any thing which SCons can use
to build some other thing. The canonical "thing," of course, is a file,
but a Node can also represent something remote (like a web page) or
something completely abstract (like an Alias).
Each specific type of "thing" is specifically represented by a subclass
of the Node base class: Node.FS.File for files, Node.Alias for aliases,
etc. Dependency information is kept here in the base class, and
information specific to files/aliases/etc. is in the subclass. The
goal, if we've done this correctly, is that any type of "thing" should
be able to depend on any other type of "thing."
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Node/__init__.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import collections
import copy
from itertools import chain
from SCons.Debug import logInstanceCreation
import SCons.Executor
import SCons.Memoize
import SCons.Util
from SCons.Debug import Trace
def classname(obj):
return str(obj.__class__).split('.')[-1]
# Node states
#
# These are in "priority" order, so that the maximum value for any
# child/dependency of a node represents the state of that node if
# it has no builder of its own. The canonical example is a file
# system directory, which is only up to date if all of its children
# were up to date.
no_state = 0
pending = 1
executing = 2
up_to_date = 3
executed = 4
failed = 5
StateString = {
0 : "no_state",
1 : "pending",
2 : "executing",
3 : "up_to_date",
4 : "executed",
5 : "failed",
}
# controls whether implicit dependencies are cached:
implicit_cache = 0
# controls whether implicit dep changes are ignored:
implicit_deps_unchanged = 0
# controls whether the cached implicit deps are ignored:
implicit_deps_changed = 0
# A variable that can be set to an interface-specific function be called
# to annotate a Node with information about its creation.
def do_nothing(node): pass
Annotate = do_nothing
# Classes for signature info for Nodes.
class NodeInfoBase(object):
"""
The generic base class for signature information for a Node.
Node subclasses should subclass NodeInfoBase to provide their own
logic for dealing with their own Node-specific signature information.
"""
current_version_id = 1
def __init__(self, node=None):
# Create an object attribute from the class attribute so it ends up
# in the pickled data in the .sconsign file.
self._version_id = self.current_version_id
def update(self, node):
try:
field_list = self.field_list
except AttributeError:
return
for f in field_list:
try:
delattr(self, f)
except AttributeError:
pass
try:
func = getattr(node, 'get_' + f)
except AttributeError:
pass
else:
setattr(self, f, func())
def convert(self, node, val):
pass
def merge(self, other):
self.__dict__.update(other.__dict__)
def format(self, field_list=None, names=0):
if field_list is None:
try:
field_list = self.field_list
except AttributeError:
field_list = sorted(self.__dict__.keys())
fields = []
for field in field_list:
try:
f = getattr(self, field)
except AttributeError:
f = None
f = str(f)
if names:
f = field + ': ' + f
fields.append(f)
return fields
class BuildInfoBase(object):
"""
The generic base class for build information for a Node.
This is what gets stored in a .sconsign file for each target file.
It contains a NodeInfo instance for this node (signature information
that's specific to the type of Node) and direct attributes for the
generic build stuff we have to track: sources, explicit dependencies,
implicit dependencies, and action information.
"""
current_version_id = 1
def __init__(self, node=None):
# Create an object attribute from the class attribute so it ends up
# in the pickled data in the .sconsign file.
self._version_id = self.current_version_id
self.bsourcesigs = []
self.bdependsigs = []
self.bimplicitsigs = []
self.bactsig = None
def merge(self, other):
self.__dict__.update(other.__dict__)
class Node(object):
"""The base Node class, for entities that we know how to
build, or use to build other Nodes.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
class Attrs(object):
pass
def __init__(self):
if __debug__: logInstanceCreation(self, 'Node.Node')
# Note that we no longer explicitly initialize a self.builder
# attribute to None here. That's because the self.builder
# attribute may be created on-the-fly later by a subclass (the
# canonical example being a builder to fetch a file from a
# source code system like CVS or Subversion).
# Each list of children that we maintain is accompanied by a
# dictionary used to look up quickly whether a node is already
# present in the list. Empirical tests showed that it was
# fastest to maintain them as side-by-side Node attributes in
# this way, instead of wrapping up each list+dictionary pair in
# a class. (Of course, we could always still do that in the
# future if we had a good reason to...).
self.sources = [] # source files used to build node
self.sources_set = set()
self._specific_sources = False
self.depends = [] # explicit dependencies (from Depends)
self.depends_set = set()
self.ignore = [] # dependencies to ignore
self.ignore_set = set()
self.prerequisites = SCons.Util.UniqueList()
self.implicit = None # implicit (scanned) dependencies (None means not scanned yet)
self.waiting_parents = set()
self.waiting_s_e = set()
self.ref_count = 0
self.wkids = None # Kids yet to walk, when it's an array
self.env = None
self.state = no_state
self.precious = None
self.noclean = 0
self.nocache = 0
self.cached = 0 # is this node pulled from cache?
self.always_build = None
self.includes = None
self.attributes = self.Attrs() # Generic place to stick information about the Node.
self.side_effect = 0 # true iff this node is a side effect
self.side_effects = [] # the side effects of building this target
self.linked = 0 # is this node linked to the variant directory?
self.clear_memoized_values()
# Let the interface in which the build engine is embedded
# annotate this Node with its own info (like a description of
# what line in what file created the node, for example).
Annotate(self)
def disambiguate(self, must_exist=None):
return self
def get_suffix(self):
return ''
memoizer_counters.append(SCons.Memoize.CountValue('get_build_env'))
def get_build_env(self):
"""Fetch the appropriate Environment to build this node.
"""
try:
return self._memo['get_build_env']
except KeyError:
pass
result = self.get_executor().get_build_env()
self._memo['get_build_env'] = result
return result
def get_build_scanner_path(self, scanner):
"""Fetch the appropriate scanner path for this node."""
return self.get_executor().get_build_scanner_path(scanner)
def set_executor(self, executor):
"""Set the action executor for this node."""
self.executor = executor
def get_executor(self, create=1):
"""Fetch the action executor for this node. Create one if
there isn't already one, and requested to do so."""
try:
executor = self.executor
except AttributeError:
if not create:
raise
try:
act = self.builder.action
except AttributeError:
executor = SCons.Executor.Null(targets=[self])
else:
executor = SCons.Executor.Executor(act,
self.env or self.builder.env,
[self.builder.overrides],
[self],
self.sources)
self.executor = executor
return executor
def executor_cleanup(self):
"""Let the executor clean up any cached information."""
try:
executor = self.get_executor(create=None)
except AttributeError:
pass
else:
executor.cleanup()
def reset_executor(self):
"Remove cached executor; forces recompute when needed."
try:
delattr(self, 'executor')
except AttributeError:
pass
def push_to_cache(self):
"""Try to push a node into a cache
"""
pass
def retrieve_from_cache(self):
"""Try to retrieve the node's content from a cache
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Returns true if the node was successfully retrieved.
"""
return 0
#
# Taskmaster interface subsystem
#
def make_ready(self):
"""Get a Node ready for evaluation.
This is called before the Taskmaster decides if the Node is
up-to-date or not. Overriding this method allows for a Node
subclass to be disambiguated if necessary, or for an implicit
source builder to be attached.
"""
pass
def prepare(self):
"""Prepare for this Node to be built.
This is called after the Taskmaster has decided that the Node
is out-of-date and must be rebuilt, but before actually calling
the method to build the Node.
This default implementation checks that explicit or implicit
dependencies either exist or are derived, and initializes the
BuildInfo structure that will hold the information about how
this node is, uh, built.
(The existence of source files is checked separately by the
Executor, which aggregates checks for all of the targets built
by a specific action.)
Overriding this method allows for for a Node subclass to remove
the underlying file from the file system. Note that subclass
methods should call this base class method to get the child
check and the BuildInfo structure.
"""
for d in self.depends:
if d.missing():
msg = "Explicit dependency `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (d, self))
if self.implicit is not None:
for i in self.implicit:
if i.missing():
msg = "Implicit dependency `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (i, self))
self.binfo = self.get_binfo()
def build(self, **kw):
"""Actually build the node.
This is called by the Taskmaster after it's decided that the
Node is out-of-date and must be rebuilt, and after the prepare()
method has gotten everything, uh, prepared.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff
in built().
"""
try:
self.get_executor()(self, **kw)
except SCons.Errors.BuildError, e:
e.node = self
raise
def built(self):
"""Called just after this node is successfully built."""
# Clear the implicit dependency caches of any Nodes
# waiting for this Node to be built.
for parent in self.waiting_parents:
parent.implicit = None
self.clear()
self.ninfo.update(self)
def visited(self):
"""Called just after this node has been visited (with or
without a build)."""
try:
binfo = self.binfo
except AttributeError:
# Apparently this node doesn't need build info, so
# don't bother calculating or storing it.
pass
else:
self.ninfo.update(self)
self.store_info()
#
#
#
def add_to_waiting_s_e(self, node):
self.waiting_s_e.add(node)
def add_to_waiting_parents(self, node):
"""
Returns the number of nodes added to our waiting parents list:
1 if we add a unique waiting parent, 0 if not. (Note that the
returned values are intended to be used to increment a reference
count, so don't think you can "clean up" this function by using
True and False instead...)
"""
wp = self.waiting_parents
if node in wp:
return 0
wp.add(node)
return 1
def postprocess(self):
"""Clean up anything we don't need to hang onto after we've
been built."""
self.executor_cleanup()
self.waiting_parents = set()
def clear(self):
"""Completely clear a Node of all its cached state (so that it
can be re-evaluated by interfaces that do continuous integration
builds).
"""
# The del_binfo() call here isn't necessary for normal execution,
# but is for interactive mode, where we might rebuild the same
# target and need to start from scratch.
self.del_binfo()
self.clear_memoized_values()
self.ninfo = self.new_ninfo()
self.executor_cleanup()
try:
delattr(self, '_calculated_sig')
except AttributeError:
pass
self.includes = None
def clear_memoized_values(self):
self._memo = {}
def builder_set(self, builder):
self.builder = builder
try:
del self.executor
except AttributeError:
pass
def has_builder(self):
"""Return whether this Node has a builder or not.
In Boolean tests, this turns out to be a *lot* more efficient
than simply examining the builder attribute directly ("if
node.builder: ..."). When the builder attribute is examined
directly, it ends up calling __getattr__ for both the __len__
and __nonzero__ attributes on instances of our Builder Proxy
class(es), generating a bazillion extra calls and slowing
things down immensely.
"""
try:
b = self.builder
except AttributeError:
# There was no explicit builder for this Node, so initialize
# the self.builder attribute to None now.
b = self.builder = None
return b is not None
def set_explicit(self, is_explicit):
self.is_explicit = is_explicit
def has_explicit_builder(self):
"""Return whether this Node has an explicit builder
This allows an internal Builder created by SCons to be marked
non-explicit, so that it can be overridden by an explicit
builder that the user supplies (the canonical example being
directories)."""
try:
return self.is_explicit
except AttributeError:
self.is_explicit = None
return self.is_explicit
def get_builder(self, default_builder=None):
"""Return the set builder, or a specified default value"""
try:
return self.builder
except AttributeError:
return default_builder
multiple_side_effect_has_builder = has_builder
def is_derived(self):
"""
Returns true iff this node is derived (i.e. built).
This should return true only for nodes whose path should be in
the variant directory when duplicate=0 and should contribute their build
signatures when they are used as source files to other derived files. For
example: source with source builders are not derived in this sense,
and hence should not return true.
"""
return self.has_builder() or self.side_effect
def alter_targets(self):
"""Return a list of alternate targets for this Node.
"""
return [], None
def get_found_includes(self, env, scanner, path):
"""Return the scanned include lines (implicit dependencies)
found in this node.
The default is no implicit dependencies. We expect this method
to be overridden by any subclass that can be scanned for
implicit dependencies.
"""
return []
def get_implicit_deps(self, env, scanner, path):
"""Return a list of implicit dependencies for this node.
This method exists to handle recursive invocation of the scanner
on the implicit dependencies returned by the scanner, if the
scanner's recursive flag says that we should.
"""
if not scanner:
return []
# Give the scanner a chance to select a more specific scanner
# for this Node.
#scanner = scanner.select(self)
nodes = [self]
seen = {}
seen[self] = 1
deps = []
while nodes:
n = nodes.pop(0)
d = [x for x in n.get_found_includes(env, scanner, path) if x not in seen]
if d:
deps.extend(d)
for n in d:
seen[n] = 1
nodes.extend(scanner.recurse_nodes(d))
return deps
def get_env_scanner(self, env, kw={}):
return env.get_scanner(self.scanner_key())
def get_target_scanner(self):
return self.builder.target_scanner
def get_source_scanner(self, node):
"""Fetch the source scanner for the specified node
NOTE: "self" is the target being built, "node" is
the source file for which we want to fetch the scanner.
Implies self.has_builder() is true; again, expect to only be
called from locations where this is already verified.
This function may be called very often; it attempts to cache
the scanner found to improve performance.
"""
scanner = None
try:
scanner = self.builder.source_scanner
except AttributeError:
pass
if not scanner:
# The builder didn't have an explicit scanner, so go look up
# a scanner from env['SCANNERS'] based on the node's scanner
# key (usually the file extension).
scanner = self.get_env_scanner(self.get_build_env())
if scanner:
scanner = scanner.select(node)
return scanner
def add_to_implicit(self, deps):
if not hasattr(self, 'implicit') or self.implicit is None:
self.implicit = []
self.implicit_set = set()
self._children_reset()
self._add_child(self.implicit, self.implicit_set, deps)
def scan(self):
"""Scan this node's dependents for implicit dependencies."""
# Don't bother scanning non-derived files, because we don't
# care what their dependencies are.
# Don't scan again, if we already have scanned.
if self.implicit is not None:
return
self.implicit = []
self.implicit_set = set()
self._children_reset()
if not self.has_builder():
return
build_env = self.get_build_env()
executor = self.get_executor()
# Here's where we implement --implicit-cache.
if implicit_cache and not implicit_deps_changed:
implicit = self.get_stored_implicit()
if implicit is not None:
# We now add the implicit dependencies returned from the
# stored .sconsign entry to have already been converted
# to Nodes for us. (We used to run them through a
# source_factory function here.)
# Update all of the targets with them. This
# essentially short-circuits an N*M scan of the
# sources for each individual target, which is a hell
# of a lot more efficient.
for tgt in executor.get_all_targets():
tgt.add_to_implicit(implicit)
if implicit_deps_unchanged or self.is_up_to_date():
return
# one of this node's sources has changed,
# so we must recalculate the implicit deps for all targets
for tgt in executor.get_all_targets():
tgt.implicit = []
tgt.implicit_set = set()
# Have the executor scan the sources.
executor.scan_sources(self.builder.source_scanner)
# If there's a target scanner, have the executor scan the target
# node itself and associated targets that might be built.
scanner = self.get_target_scanner()
if scanner:
executor.scan_targets(scanner)
def scanner_key(self):
return None
def select_scanner(self, scanner):
"""Selects a scanner for this Node.
This is a separate method so it can be overridden by Node
subclasses (specifically, Node.FS.Dir) that *must* use their
own Scanner and don't select one the Scanner.Selector that's
configured for the target.
"""
return scanner.select(self)
def env_set(self, env, safe=0):
if safe and self.env:
return
self.env = env
#
# SIGNATURE SUBSYSTEM
#
NodeInfo = NodeInfoBase
BuildInfo = BuildInfoBase
def new_ninfo(self):
ninfo = self.NodeInfo(self)
return ninfo
def get_ninfo(self):
try:
return self.ninfo
except AttributeError:
self.ninfo = self.new_ninfo()
return self.ninfo
def new_binfo(self):
binfo = self.BuildInfo(self)
return binfo
def get_binfo(self):
"""
Fetch a node's build information.
node - the node whose sources will be collected
cache - alternate node to use for the signature cache
returns - the build signature
This no longer handles the recursive descent of the
node's children's signatures. We expect that they're
already built and updated by someone else, if that's
what's wanted.
"""
try:
return self.binfo
except AttributeError:
pass
binfo = self.new_binfo()
self.binfo = binfo
executor = self.get_executor()
ignore_set = self.ignore_set
if self.has_builder():
binfo.bact = str(executor)
binfo.bactsig = SCons.Util.MD5signature(executor.get_contents())
if self._specific_sources:
sources = []
for s in self.sources:
if s not in ignore_set:
sources.append(s)
else:
sources = executor.get_unignored_sources(self, self.ignore)
seen = set()
bsources = []
bsourcesigs = []
for s in sources:
if not s in seen:
seen.add(s)
bsources.append(s)
bsourcesigs.append(s.get_ninfo())
binfo.bsources = bsources
binfo.bsourcesigs = bsourcesigs
depends = self.depends
dependsigs = []
for d in depends:
if d not in ignore_set:
dependsigs.append(d.get_ninfo())
binfo.bdepends = depends
binfo.bdependsigs = dependsigs
implicit = self.implicit or []
implicitsigs = []
for i in implicit:
if i not in ignore_set:
implicitsigs.append(i.get_ninfo())
binfo.bimplicit = implicit
binfo.bimplicitsigs = implicitsigs
return binfo
def del_binfo(self):
"""Delete the build info from this node."""
try:
delattr(self, 'binfo')
except AttributeError:
pass
def get_csig(self):
try:
return self.ninfo.csig
except AttributeError:
ninfo = self.get_ninfo()
ninfo.csig = SCons.Util.MD5signature(self.get_contents())
return self.ninfo.csig
def get_cachedir_csig(self):
return self.get_csig()
def store_info(self):
"""Make the build signature permanent (that is, store it in the
.sconsign file or equivalent)."""
pass
def do_not_store_info(self):
pass
def get_stored_info(self):
return None
def get_stored_implicit(self):
"""Fetch the stored implicit dependencies"""
return None
#
#
#
def set_precious(self, precious = 1):
"""Set the Node's precious value."""
self.precious = precious
def set_noclean(self, noclean = 1):
"""Set the Node's noclean value."""
# Make sure noclean is an integer so the --debug=stree
# output in Util.py can use it as an index.
self.noclean = noclean and 1 or 0
def set_nocache(self, nocache = 1):
"""Set the Node's nocache value."""
# Make sure nocache is an integer so the --debug=stree
# output in Util.py can use it as an index.
self.nocache = nocache and 1 or 0
def set_always_build(self, always_build = 1):
"""Set the Node's always_build value."""
self.always_build = always_build
def exists(self):
"""Does this node exists?"""
# All node exist by default:
return 1
def rexists(self):
"""Does this node exist locally or in a repositiory?"""
# There are no repositories by default:
return self.exists()
def missing(self):
return not self.is_derived() and \
not self.linked and \
not self.rexists()
def remove(self):
"""Remove this Node: no-op by default."""
return None
def add_dependency(self, depend):
"""Adds dependencies."""
try:
self._add_child(self.depends, self.depends_set, depend)
except TypeError, e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def add_prerequisite(self, prerequisite):
"""Adds prerequisites"""
self.prerequisites.extend(prerequisite)
self._children_reset()
def add_ignore(self, depend):
"""Adds dependencies to ignore."""
try:
self._add_child(self.ignore, self.ignore_set, depend)
except TypeError, e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to ignore a non-Node dependency of %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def add_source(self, source):
"""Adds sources."""
if self._specific_sources:
return
try:
self._add_child(self.sources, self.sources_set, source)
except TypeError, e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node as source of %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def _add_child(self, collection, set, child):
"""Adds 'child' to 'collection', first checking 'set' to see if it's
already present."""
#if type(child) is not type([]):
# child = [child]
#for c in child:
# if not isinstance(c, Node):
# raise TypeError, c
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1
if added:
self._children_reset()
def set_specific_source(self, source):
self.add_source(source)
self._specific_sources = True
def add_wkid(self, wkid):
"""Add a node to the list of kids waiting to be evaluated"""
if self.wkids is not None:
self.wkids.append(wkid)
def _children_reset(self):
self.clear_memoized_values()
# We need to let the Executor clear out any calculated
# build info that it's cached so we can re-calculate it.
self.executor_cleanup()
memoizer_counters.append(SCons.Memoize.CountValue('_children_get'))
def _children_get(self):
try:
return self._memo['children_get']
except KeyError:
pass
# The return list may contain duplicate Nodes, especially in
# source trees where there are a lot of repeated #includes
# of a tangle of .h files. Profiling shows, however, that
# eliminating the duplicates with a brute-force approach that
# preserves the order (that is, something like:
#
# u = []
# for n in list:
# if n not in u:
# u.append(n)"
#
# takes more cycles than just letting the underlying methods
# hand back cached values if a Node's information is requested
# multiple times. (Other methods of removing duplicates, like
# using dictionary keys, lose the order, and the only ordered
# dictionary patterns I found all ended up using "not in"
# internally anyway...)
if self.ignore_set:
if self.implicit is None:
iter = chain(self.sources,self.depends)
else:
iter = chain(self.sources, self.depends, self.implicit)
children = []
for i in iter:
if i not in self.ignore_set:
children.append(i)
else:
if self.implicit is None:
children = self.sources + self.depends
else:
children = self.sources + self.depends + self.implicit
self._memo['children_get'] = children
return children
def all_children(self, scan=1):
"""Return a list of all the node's direct children."""
if scan:
self.scan()
# The return list may contain duplicate Nodes, especially in
# source trees where there are a lot of repeated #includes
# of a tangle of .h files. Profiling shows, however, that
# eliminating the duplicates with a brute-force approach that
# preserves the order (that is, something like:
#
# u = []
# for n in list:
# if n not in u:
# u.append(n)"
#
# takes more cycles than just letting the underlying methods
# hand back cached values if a Node's information is requested
# multiple times. (Other methods of removing duplicates, like
# using dictionary keys, lose the order, and the only ordered
# dictionary patterns I found all ended up using "not in"
# internally anyway...)
if self.implicit is None:
return self.sources + self.depends
else:
return self.sources + self.depends + self.implicit
def children(self, scan=1):
"""Return a list of the node's direct children, minus those
that are ignored by this node."""
if scan:
self.scan()
return self._children_get()
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
def state_has_changed(self, target, prev_ni):
return (self.state != SCons.Node.up_to_date)
def get_env(self):
env = self.env
if not env:
import SCons.Defaults
env = SCons.Defaults.DefaultEnvironment()
return env
def changed_since_last_build(self, target, prev_ni):
"""
Must be overridden in a specific subclass to return True if this
Node (a dependency) has changed since the last time it was used
to build the specified target. prev_ni is this Node's state (for
example, its file timestamp, length, maybe content signature)
as of the last time the target was built.
Note that this method is called through the dependency, not the
target, because a dependency Node must be able to use its own
logic to decide if it changed. For example, File Nodes need to
obey if we're configured to use timestamps, but Python Value Nodes
never use timestamps and always use the content. If this method
were called through the target, then each Node's implementation
of this method would have to have more complicated logic to
handle all the different Node types on which it might depend.
"""
raise NotImplementedError
def Decider(self, function):
SCons.Util.AddMethod(self, function, 'changed_since_last_build')
def changed(self, node=None):
"""
Returns if the node is up-to-date with respect to the BuildInfo
stored last time it was built. The default behavior is to compare
it against our own previously stored BuildInfo, but the stored
BuildInfo from another Node (typically one in a Repository)
can be used instead.
Note that we now *always* check every dependency. We used to
short-circuit the check by returning as soon as we detected
any difference, but we now rely on checking every dependency
to make sure that any necessary Node information (for example,
the content signature of an #included .h file) is updated.
"""
t = 0
if t: Trace('changed(%s [%s], %s)' % (self, classname(self), node))
if node is None:
node = self
result = False
bi = node.get_stored_info().binfo
then = bi.bsourcesigs + bi.bdependsigs + bi.bimplicitsigs
children = self.children()
diff = len(children) - len(then)
if diff:
# The old and new dependency lists are different lengths.
# This always indicates that the Node must be rebuilt.
# We also extend the old dependency list with enough None
# entries to equal the new dependency list, for the benefit
# of the loop below that updates node information.
then.extend([None] * diff)
if t: Trace(': old %s new %s' % (len(then), len(children)))
result = True
for child, prev_ni in zip(children, then):
if child.changed_since_last_build(self, prev_ni):
if t: Trace(': %s changed' % child)
result = True
contents = self.get_executor().get_contents()
if self.has_builder():
import SCons.Util
newsig = SCons.Util.MD5signature(contents)
if bi.bactsig != newsig:
if t: Trace(': bactsig %s != newsig %s' % (bi.bactsig, newsig))
result = True
if not result:
if t: Trace(': up to date')
if t: Trace('\n')
return result
def is_up_to_date(self):
"""Default check for whether the Node is current: unknown Node
subtypes are always out of date, so they will always get built."""
return None
def children_are_up_to_date(self):
"""Alternate check for whether the Node is current: If all of
our children were up-to-date, then this Node was up-to-date, too.
The SCons.Node.Alias and SCons.Node.Python.Value subclasses
rebind their current() method to this method."""
# Allow the children to calculate their signatures.
self.binfo = self.get_binfo()
if self.always_build:
return None
state = 0
for kid in self.children(None):
s = kid.get_state()
if s and (not state or s > state):
state = s
return (state == 0 or state == SCons.Node.up_to_date)
def is_literal(self):
"""Always pass the string representation of a Node to
the command interpreter literally."""
return 1
def render_include_tree(self):
"""
Return a text representation, suitable for displaying to the
user, of the include tree for the sources of this node.
"""
if self.is_derived() and self.env:
env = self.get_build_env()
for s in self.sources:
scanner = self.get_source_scanner(s)
if scanner:
path = self.get_build_scanner_path(scanner)
else:
path = None
def f(node, env=env, scanner=scanner, path=path):
return node.get_found_includes(env, scanner, path)
return SCons.Util.render_tree(s, f, 1)
else:
return None
def get_abspath(self):
"""
Return an absolute path to the Node. This will return simply
str(Node) by default, but for Node types that have a concept of
relative path, this might return something different.
"""
return str(self)
def for_signature(self):
"""
Return a string representation of the Node that will always
be the same for this particular Node, no matter what. This
is by contrast to the __str__() method, which might, for
instance, return a relative path for a file Node. The purpose
of this method is to generate a value to be used in signature
calculation for the command line used to build a target, and
we use this method instead of str() to avoid unnecessary
rebuilds. This method does not need to return something that
would actually work in a command line; it can return any kind of
nonsense, so long as it does not change.
"""
return str(self)
def get_string(self, for_signature):
"""This is a convenience function designed primarily to be
used in command generators (i.e., CommandGeneratorActions or
Environment variables that are callable), which are called
with a for_signature argument that is nonzero if the command
generator is being called to generate a signature for the
command line, which determines if we should rebuild or not.
Such command generators should use this method in preference
to str(Node) when converting a Node to a string, passing
in the for_signature parameter, such that we will call
Node.for_signature() or str(Node) properly, depending on whether
we are calculating a signature or actually constructing a
command line."""
if for_signature:
return self.for_signature()
return str(self)
def get_subst_proxy(self):
"""
This method is expected to return an object that will function
exactly like this Node, except that it implements any additional
special features that we would like to be in effect for
Environment variable substitution. The principle use is that
some Nodes would like to implement a __getattr__() method,
but putting that in the Node type itself has a tendency to kill
performance. We instead put it in a proxy and return it from
this method. It is legal for this method to return self
if no new functionality is needed for Environment substitution.
"""
return self
def explain(self):
if not self.exists():
return "building `%s' because it doesn't exist\n" % self
if self.always_build:
return "rebuilding `%s' because AlwaysBuild() is specified\n" % self
old = self.get_stored_info()
if old is None:
return None
old = old.binfo
old.prepare_dependencies()
try:
old_bkids = old.bsources + old.bdepends + old.bimplicit
old_bkidsigs = old.bsourcesigs + old.bdependsigs + old.bimplicitsigs
except AttributeError:
return "Cannot explain why `%s' is being rebuilt: No previous build information found\n" % self
new = self.get_binfo()
new_bkids = new.bsources + new.bdepends + new.bimplicit
new_bkidsigs = new.bsourcesigs + new.bdependsigs + new.bimplicitsigs
osig = dict(zip(old_bkids, old_bkidsigs))
nsig = dict(zip(new_bkids, new_bkidsigs))
# The sources and dependencies we'll want to report are all stored
# as relative paths to this target's directory, but we want to
# report them relative to the top-level SConstruct directory,
# so we only print them after running them through this lambda
# to turn them into the right relative Node and then return
# its string.
def stringify( s, E=self.dir.Entry ) :
if hasattr( s, 'dir' ) :
return str(E(s))
return str(s)
lines = []
removed = [x for x in old_bkids if not x in new_bkids]
if removed:
removed = list(map(stringify, removed))
fmt = "`%s' is no longer a dependency\n"
lines.extend([fmt % s for s in removed])
for k in new_bkids:
if not k in old_bkids:
lines.append("`%s' is a new dependency\n" % stringify(k))
elif k.changed_since_last_build(self, osig[k]):
lines.append("`%s' changed\n" % stringify(k))
if len(lines) == 0 and old_bkids != new_bkids:
lines.append("the dependency order changed:\n" +
"%sold: %s\n" % (' '*15, list(map(stringify, old_bkids))) +
"%snew: %s\n" % (' '*15, list(map(stringify, new_bkids))))
if len(lines) == 0:
def fmt_with_title(title, strlines):
lines = strlines.split('\n')
sep = '\n' + ' '*(15 + len(title))
return ' '*15 + title + sep.join(lines) + '\n'
if old.bactsig != new.bactsig:
if old.bact == new.bact:
lines.append("the contents of the build action changed\n" +
fmt_with_title('action: ', new.bact))
else:
lines.append("the build action changed:\n" +
fmt_with_title('old: ', old.bact) +
fmt_with_title('new: ', new.bact))
if len(lines) == 0:
return "rebuilding `%s' for unknown reasons\n" % self
preamble = "rebuilding `%s' because" % self
if len(lines) == 1:
return "%s %s" % (preamble, lines[0])
else:
lines = ["%s:\n" % preamble] + lines
return ( ' '*11).join(lines)
class NodeList(collections.UserList):
def __str__(self):
return str(list(map(str, self.data)))
def get_children(node, parent): return node.children()
def ignore_cycle(node, stack): pass
def do_nothing(node, parent): pass
class Walker(object):
"""An iterator for walking a Node tree.
This is depth-first, children are visited before the parent.
The Walker object can be initialized with any node, and
returns the next node on the descent with each get_next() call.
'kids_func' is an optional function that will be called to
get the children of a node instead of calling 'children'.
'cycle_func' is an optional function that will be called
when a cycle is detected.
This class does not get caught in node cycles caused, for example,
by C header file include loops.
"""
def __init__(self, node, kids_func=get_children,
cycle_func=ignore_cycle,
eval_func=do_nothing):
self.kids_func = kids_func
self.cycle_func = cycle_func
self.eval_func = eval_func
node.wkids = copy.copy(kids_func(node, None))
self.stack = [node]
self.history = {} # used to efficiently detect and avoid cycles
self.history[node] = None
def get_next(self):
"""Return the next node for this walk of the tree.
This function is intentionally iterative, not recursive,
to sidestep any issues of stack size limitations.
"""
while self.stack:
if self.stack[-1].wkids:
node = self.stack[-1].wkids.pop(0)
if not self.stack[-1].wkids:
self.stack[-1].wkids = None
if node in self.history:
self.cycle_func(node, self.stack)
else:
node.wkids = copy.copy(self.kids_func(node, self.stack[-1]))
self.stack.append(node)
self.history[node] = None
else:
node = self.stack.pop()
del self.history[node]
if node:
if self.stack:
parent = self.stack[-1]
else:
parent = None
self.eval_func(node, parent)
return node
return None
def is_done(self):
return not self.stack
arg2nodes_lookups = []
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""scons.Node.Python
Python nodes.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Python.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Node
class ValueNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return Value(s)
class ValueBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Value(SCons.Node.Node):
"""A class for Python variables, typically passed on the command line
or generated by a script, but not from a file or some other source.
"""
NodeInfo = ValueNodeInfo
BuildInfo = ValueBuildInfo
def __init__(self, value, built_value=None):
SCons.Node.Node.__init__(self)
self.value = value
if built_value is not None:
self.built_value = built_value
def str_for_display(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def make_ready(self):
self.get_csig()
def build(self, **kw):
if not hasattr(self, 'built_value'):
SCons.Node.Node.build(self, **kw)
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Value nodes get built regardless of
# what directory scons was run from. Value nodes
# are outside the filesystem:
return 1
def write(self, built_value):
"""Set the value of the node."""
self.built_value = built_value
def read(self):
"""Return the value. If necessary, the value is built."""
self.build()
if not hasattr(self, 'built_value'):
self.built_value = self.value
return self.built_value
def get_text_contents(self):
"""By the assumption that the node.built_value is a
deterministic product of the sources, the contents of a Value
are the concatenation of all the contents of its sources. As
the value need not be built when get_contents() is called, we
cannot use the actual node.built_value."""
###TODO: something reasonable about universal newlines
contents = str(self.value)
for kid in self.children(None):
contents = contents + kid.get_contents()
return contents
get_contents = get_text_contents ###TODO should return 'bytes' value
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def get_csig(self, calc=None):
"""Because we're a Python value node and don't have a real
timestamp, we get to ignore the calculator and just use the
value contents."""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
self.get_ninfo().csig = contents
return contents
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""scons.Node.Alias
Alias nodes.
This creates a hash of global Aliases (dummy targets).
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Alias.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import collections
import SCons.Errors
import SCons.Node
import SCons.Util
class AliasNameSpace(collections.UserDict):
def Alias(self, name, **kw):
if isinstance(name, SCons.Node.Alias.Alias):
return name
try:
a = self[name]
except KeyError:
a = SCons.Node.Alias.Alias(name, **kw)
self[name] = a
return a
def lookup(self, name, **kw):
try:
return self[name]
except KeyError:
return None
class AliasNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return default_ans.Alias(s)
class AliasBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Alias(SCons.Node.Node):
NodeInfo = AliasNodeInfo
BuildInfo = AliasBuildInfo
def __init__(self, name):
SCons.Node.Node.__init__(self)
self.name = name
def str_for_display(self):
return '"' + self.__str__() + '"'
def __str__(self):
return self.name
def make_ready(self):
self.get_csig()
really_build = SCons.Node.Node.build
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Alias nodes get built regardless of
# what directory scons was run from. Alias nodes
# are outside the filesystem:
return 1
def get_contents(self):
"""The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs)
def sconsign(self):
"""An Alias is not recorded in .sconsign files"""
pass
#
#
#
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def build(self):
"""A "builder" for aliases."""
pass
def convert(self):
try: del self.builder
except AttributeError: pass
self.reset_executor()
self.build = self.really_build
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
csig = SCons.Util.MD5signature(contents)
self.get_ninfo().csig = csig
return csig
default_ans = AliasNameSpace()
SCons.Node.arg2nodes_lookups.append(default_ans.lookup)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Job
This module defines the Serial and Parallel classes that execute tasks to
complete a build. The Jobs class provides a higher level interface to start,
stop, and wait on jobs.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Job.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.compat
import os
import signal
import SCons.Errors
# The default stack size (in kilobytes) of the threads used to execute
# jobs in parallel.
#
# We use a stack size of 256 kilobytes. The default on some platforms
# is too large and prevents us from creating enough threads to fully
# parallelized the build. For example, the default stack size on linux
# is 8 MBytes.
explicit_stack_size = None
default_stack_size = 256
interrupt_msg = 'Build interrupted.'
class InterruptState(object):
def __init__(self):
self.interrupted = False
def set(self):
self.interrupted = True
def __call__(self):
return self.interrupted
class Jobs(object):
"""An instance of this class initializes N jobs, and provides
methods for starting, stopping, and waiting on all N jobs.
"""
def __init__(self, num, taskmaster):
"""
create 'num' jobs using the given taskmaster.
If 'num' is 1 or less, then a serial job will be used,
otherwise a parallel job with 'num' worker threads will
be used.
The 'num_jobs' attribute will be set to the actual number of jobs
allocated. If more than one job is requested but the Parallel
class can't do it, it gets reset to 1. Wrapping interfaces that
care should check the value of 'num_jobs' after initialization.
"""
self.job = None
if num > 1:
stack_size = explicit_stack_size
if stack_size is None:
stack_size = default_stack_size
try:
self.job = Parallel(taskmaster, num, stack_size)
self.num_jobs = num
except NameError:
pass
if self.job is None:
self.job = Serial(taskmaster)
self.num_jobs = 1
def run(self, postfunc=lambda: None):
"""Run the jobs.
postfunc() will be invoked after the jobs has run. It will be
invoked even if the jobs are interrupted by a keyboard
interrupt (well, in fact by a signal such as either SIGINT,
SIGTERM or SIGHUP). The execution of postfunc() is protected
against keyboard interrupts and is guaranteed to run to
completion."""
self._setup_sig_handler()
try:
self.job.start()
finally:
postfunc()
self._reset_sig_handler()
def were_interrupted(self):
"""Returns whether the jobs were interrupted by a signal."""
return self.job.interrupted()
def _setup_sig_handler(self):
"""Setup an interrupt handler so that SCons can shutdown cleanly in
various conditions:
a) SIGINT: Keyboard interrupt
b) SIGTERM: kill or system shutdown
c) SIGHUP: Controlling shell exiting
We handle all of these cases by stopping the taskmaster. It
turns out that it very difficult to stop the build process
by throwing asynchronously an exception such as
KeyboardInterrupt. For example, the python Condition
variables (threading.Condition) and queue's do not seem to
asynchronous-exception-safe. It would require adding a whole
bunch of try/finally block and except KeyboardInterrupt all
over the place.
Note also that we have to be careful to handle the case when
SCons forks before executing another process. In that case, we
want the child to exit immediately.
"""
def handler(signum, stack, self=self, parentpid=os.getpid()):
if os.getpid() == parentpid:
self.job.taskmaster.stop()
self.job.interrupted.set()
else:
os._exit(2)
self.old_sigint = signal.signal(signal.SIGINT, handler)
self.old_sigterm = signal.signal(signal.SIGTERM, handler)
try:
self.old_sighup = signal.signal(signal.SIGHUP, handler)
except AttributeError:
pass
def _reset_sig_handler(self):
"""Restore the signal handlers to their previous state (before the
call to _setup_sig_handler()."""
signal.signal(signal.SIGINT, self.old_sigint)
signal.signal(signal.SIGTERM, self.old_sigterm)
try:
signal.signal(signal.SIGHUP, self.old_sighup)
except AttributeError:
pass
class Serial(object):
"""This class is used to execute tasks in series, and is more efficient
than Parallel, but is only appropriate for non-parallel builds. Only
one instance of this class should be in existence at a time.
This class is not thread safe.
"""
def __init__(self, taskmaster):
"""Create a new serial job given a taskmaster.
The taskmaster's next_task() method should return the next task
that needs to be executed, or None if there are no more tasks. The
taskmaster's executed() method will be called for each task when it
is successfully executed or failed() will be called if it failed to
execute (e.g. execute() raised an exception)."""
self.taskmaster = taskmaster
self.interrupted = InterruptState()
def start(self):
"""Start the job. This will begin pulling tasks from the taskmaster
and executing them, and return when there are no more tasks. If a task
fails to execute (i.e. execute() raises an exception), then the job will
stop."""
while True:
task = self.taskmaster.next_task()
if task is None:
break
try:
task.prepare()
if task.needs_execute():
task.execute()
except:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
else:
task.exception_set()
# Let the failed() callback function arrange for the
# build to stop if that's appropriate.
task.failed()
else:
task.executed()
task.postprocess()
self.taskmaster.cleanup()
# Trap import failure so that everything in the Job module but the
# Parallel class (and its dependent classes) will work if the interpreter
# doesn't support threads.
try:
import queue
import threading
except ImportError:
pass
else:
class Worker(threading.Thread):
"""A worker thread waits on a task to be posted to its request queue,
dequeues the task, executes it, and posts a tuple including the task
and a boolean indicating whether the task executed successfully. """
def __init__(self, requestQueue, resultsQueue, interrupted):
threading.Thread.__init__(self)
self.setDaemon(1)
self.requestQueue = requestQueue
self.resultsQueue = resultsQueue
self.interrupted = interrupted
self.start()
def run(self):
while True:
task = self.requestQueue.get()
if task is None:
# The "None" value is used as a sentinel by
# ThreadPool.cleanup(). This indicates that there
# are no more tasks, so we should quit.
break
try:
if self.interrupted():
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
task.execute()
except:
task.exception_set()
ok = False
else:
ok = True
self.resultsQueue.put((task, ok))
class ThreadPool(object):
"""This class is responsible for spawning and managing worker threads."""
def __init__(self, num, stack_size, interrupted):
"""Create the request and reply queues, and 'num' worker threads.
One must specify the stack size of the worker threads. The
stack size is specified in kilobytes.
"""
self.requestQueue = queue.Queue(0)
self.resultsQueue = queue.Queue(0)
try:
prev_size = threading.stack_size(stack_size*1024)
except AttributeError, e:
# Only print a warning if the stack size has been
# explicitly set.
if not explicit_stack_size is None:
msg = "Setting stack size is unsupported by this version of Python:\n " + \
e.args[0]
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
except ValueError, e:
msg = "Setting stack size failed:\n " + str(e)
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
# Create worker threads
self.workers = []
for _ in range(num):
worker = Worker(self.requestQueue, self.resultsQueue, interrupted)
self.workers.append(worker)
if 'prev_size' in locals():
threading.stack_size(prev_size)
def put(self, task):
"""Put task into request queue."""
self.requestQueue.put(task)
def get(self):
"""Remove and return a result tuple from the results queue."""
return self.resultsQueue.get()
def preparation_failed(self, task):
self.resultsQueue.put((task, False))
def cleanup(self):
"""
Shuts down the thread pool, giving each worker thread a
chance to shut down gracefully.
"""
# For each worker thread, put a sentinel "None" value
# on the requestQueue (indicating that there's no work
# to be done) so that each worker thread will get one and
# terminate gracefully.
for _ in self.workers:
self.requestQueue.put(None)
# Wait for all of the workers to terminate.
#
# If we don't do this, later Python versions (2.4, 2.5) often
# seem to raise exceptions during shutdown. This happens
# in requestQueue.get(), as an assertion failure that
# requestQueue.not_full is notified while not acquired,
# seemingly because the main thread has shut down (or is
# in the process of doing so) while the workers are still
# trying to pull sentinels off the requestQueue.
#
# Normally these terminations should happen fairly quickly,
# but we'll stick a one-second timeout on here just in case
# someone gets hung.
for worker in self.workers:
worker.join(1.0)
self.workers = []
class Parallel(object):
"""This class is used to execute tasks in parallel, and is somewhat
less efficient than Serial, but is appropriate for parallel builds.
This class is thread safe.
"""
def __init__(self, taskmaster, num, stack_size):
"""Create a new parallel job given a taskmaster.
The taskmaster's next_task() method should return the next
task that needs to be executed, or None if there are no more
tasks. The taskmaster's executed() method will be called
for each task when it is successfully executed or failed()
will be called if the task failed to execute (i.e. execute()
raised an exception).
Note: calls to taskmaster are serialized, but calls to
execute() on distinct tasks are not serialized, because
that is the whole point of parallel jobs: they can execute
multiple tasks simultaneously. """
self.taskmaster = taskmaster
self.interrupted = InterruptState()
self.tp = ThreadPool(num, stack_size, self.interrupted)
self.maxjobs = num
def start(self):
"""Start the job. This will begin pulling tasks from the
taskmaster and executing them, and return when there are no
more tasks. If a task fails to execute (i.e. execute() raises
an exception), then the job will stop."""
jobs = 0
while True:
# Start up as many available tasks as we're
# allowed to.
while jobs < self.maxjobs:
task = self.taskmaster.next_task()
if task is None:
break
try:
# prepare task for execution
task.prepare()
except:
task.exception_set()
task.failed()
task.postprocess()
else:
if task.needs_execute():
# dispatch task
self.tp.put(task)
jobs = jobs + 1
else:
task.executed()
task.postprocess()
if not task and not jobs: break
# Let any/all completed tasks finish up before we go
# back and put the next batch of tasks on the queue.
while True:
task, ok = self.tp.get()
jobs = jobs - 1
if ok:
task.executed()
else:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
# Let the failed() callback function arrange
# for the build to stop if that's appropriate.
task.failed()
task.postprocess()
if self.tp.resultsQueue.empty():
break
self.tp.cleanup()
self.taskmaster.cleanup()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Conftest
Autoconf-like configuration support; low level implementation of tests.
"""
#
# Copyright (c) 2003 Stichting NLnet Labs
# Copyright (c) 2001, 2002, 2003 Steven Knight
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
# The purpose of this module is to define how a check is to be performed.
# Use one of the Check...() functions below.
#
#
# A context class is used that defines functions for carrying out the tests,
# logging and messages. The following methods and members must be present:
#
# context.Display(msg) Function called to print messages that are normally
# displayed for the user. Newlines are explicitly used.
# The text should also be written to the logfile!
#
# context.Log(msg) Function called to write to a log file.
#
# context.BuildProg(text, ext)
# Function called to build a program, using "ext" for the
# file extention. Must return an empty string for
# success, an error message for failure.
# For reliable test results building should be done just
# like an actual program would be build, using the same
# command and arguments (including configure results so
# far).
#
# context.CompileProg(text, ext)
# Function called to compile a program, using "ext" for
# the file extention. Must return an empty string for
# success, an error message for failure.
# For reliable test results compiling should be done just
# like an actual source file would be compiled, using the
# same command and arguments (including configure results
# so far).
#
# context.AppendLIBS(lib_name_list)
# Append "lib_name_list" to the value of LIBS.
# "lib_namelist" is a list of strings.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.PrependLIBS(lib_name_list)
# Prepend "lib_name_list" to the value of LIBS.
# "lib_namelist" is a list of strings.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.SetLIBS(value)
# Set LIBS to "value". The type of "value" is what
# AppendLIBS() returned.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.headerfilename
# Name of file to append configure results to, usually
# "confdefs.h".
# The file must not exist or be empty when starting.
# Empty or None to skip this (some tests will not work!).
#
# context.config_h (may be missing). If present, must be a string, which
# will be filled with the contents of a config_h file.
#
# context.vardict Dictionary holding variables used for the tests and
# stores results from the tests, used for the build
# commands.
# Normally contains "CC", "LIBS", "CPPFLAGS", etc.
#
# context.havedict Dictionary holding results from the tests that are to
# be used inside a program.
# Names often start with "HAVE_". These are zero
# (feature not present) or one (feature present). Other
# variables may have any value, e.g., "PERLVERSION" can
# be a number and "SYSTEMNAME" a string.
#
import re
from types import IntType
#
# PUBLIC VARIABLES
#
LogInputFiles = 1 # Set that to log the input files in case of a failed test
LogErrorMessages = 1 # Set that to log Conftest-generated error messages
#
# PUBLIC FUNCTIONS
#
# Generic remarks:
# - When a language is specified which is not supported the test fails. The
# message is a bit different, because not all the arguments for the normal
# message are available yet (chicken-egg problem).
def CheckBuilder(context, text = None, language = None):
"""
Configure check to see if the compiler works.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
"text" may be used to specify the code to be build.
Returns an empty string for success, an error message for failure.
"""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("%s\n" % msg)
return msg
if not text:
text = """
int main() {
return 0;
}
"""
context.Display("Checking if building a %s file works... " % lang)
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, None, text)
return ret
def CheckCC(context):
"""
Configure check for a working C compiler.
This checks whether the C compiler, as defined in the $CC construction
variable, can compile a C source file. It uses the current $CCCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the C compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'CC', text, 'C')
_YesNoResult(context, ret, None, text)
return ret
def CheckSHCC(context):
"""
Configure check for a working shared C compiler.
This checks whether the C compiler, as defined in the $SHCC construction
variable, can compile a C source file. It uses the current $SHCCCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the (shared) C compiler works")
text = """
int foo()
{
return 0;
}
"""
ret = _check_empty_program(context, 'SHCC', text, 'C', use_shared = True)
_YesNoResult(context, ret, None, text)
return ret
def CheckCXX(context):
"""
Configure check for a working CXX compiler.
This checks whether the CXX compiler, as defined in the $CXX construction
variable, can compile a CXX source file. It uses the current $CXXCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the C++ compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'CXX', text, 'C++')
_YesNoResult(context, ret, None, text)
return ret
def CheckSHCXX(context):
"""
Configure check for a working shared CXX compiler.
This checks whether the CXX compiler, as defined in the $SHCXX construction
variable, can compile a CXX source file. It uses the current $SHCXXCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the (shared) C++ compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'SHCXX', text, 'C++', use_shared = True)
_YesNoResult(context, ret, None, text)
return ret
def _check_empty_program(context, comp, text, language, use_shared = False):
"""Return 0 on success, 1 otherwise."""
if comp not in context.env or not context.env[comp]:
# The compiler construction variable is not set or empty
return 1
lang, suffix, msg = _lang2suffix(language)
if msg:
return 1
if use_shared:
return context.CompileSharedObject(text, suffix)
else:
return context.CompileProg(text, suffix)
def CheckFunc(context, function_name, header = None, language = None):
"""
Configure check for a function "function_name".
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Optional "header" can be defined to define a function prototype, include a
header file or anything else that comes before main().
Sets HAVE_function_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Remarks from autoconf:
# - Don't include <ctype.h> because on OSF/1 3.0 it includes <sys/types.h>
# which includes <sys/select.h> which contains a prototype for select.
# Similarly for bzero.
# - assert.h is included to define __stub macros and hopefully few
# prototypes, which can conflict with char $1(); below.
# - Override any gcc2 internal prototype to avoid an error.
# - We use char for the function declaration because int might match the
# return type of a gcc2 builtin and then its argument prototype would
# still apply.
# - The GNU C library defines this for functions which it implements to
# always fail with ENOSYS. Some functions are actually named something
# starting with __ and the normal name is an alias.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = """
#ifdef __cplusplus
extern "C"
#endif
char %s();""" % function_name
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s(): %s\n" % (function_name, msg))
return msg
text = """
%(include)s
#include <assert.h>
%(hdr)s
int main() {
#if defined (__stub_%(name)s) || defined (__stub___%(name)s)
fail fail fail
#else
%(name)s();
#endif
return 0;
}
""" % { 'name': function_name,
'include': includetext,
'hdr': header }
context.Display("Checking for %s function %s()... " % (lang, function_name))
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + function_name, text,
"Define to 1 if the system has the function `%s'." %\
function_name)
return ret
def CheckHeader(context, header_name, header = None, language = None,
include_quotes = None):
"""
Configure check for a C or C++ header file "header_name".
Optional "header" can be defined to do something before including the
header file (unusual, supported for consistency).
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Sets HAVE_header_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS and $CPPFLAGS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Why compile the program instead of just running the preprocessor?
# It is possible that the header file exists, but actually using it may
# fail (e.g., because it depends on other header files). Thus this test is
# more strict. It may require using the "header" argument.
#
# Use <> by default, because the check is normally used for system header
# files. SCons passes '""' to overrule this.
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"\n' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for header file %s: %s\n"
% (header_name, msg))
return msg
if not include_quotes:
include_quotes = "<>"
text = "%s%s\n#include %s%s%s\n\n" % (includetext, header,
include_quotes[0], header_name, include_quotes[1])
context.Display("Checking for %s header file %s... " % (lang, header_name))
ret = context.CompileProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + header_name, text,
"Define to 1 if you have the <%s> header file." % header_name)
return ret
def CheckType(context, type_name, fallback = None,
header = None, language = None):
"""
Configure check for a C or C++ type "type_name".
Optional "header" can be defined to include a header file.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Sets HAVE_type_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s type: %s\n" % (type_name, msg))
return msg
# Remarks from autoconf about this test:
# - Grepping for the type in include files is not reliable (grep isn't
# portable anyway).
# - Using "TYPE my_var;" doesn't work for const qualified types in C++.
# Adding an initializer is not valid for some C++ classes.
# - Using the type as parameter to a function either fails for K&$ C or for
# C++.
# - Using "TYPE *my_var;" is valid in C for some types that are not
# declared (struct something).
# - Using "sizeof(TYPE)" is valid when TYPE is actually a variable.
# - Using the previous two together works reliably.
text = """
%(include)s
%(header)s
int main() {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % { 'include': includetext,
'header': header,
'name': type_name }
context.Display("Checking for %s type %s... " % (lang, type_name))
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + type_name, text,
"Define to 1 if the system has the type `%s'." % type_name)
if ret and fallback and context.headerfilename:
f = open(context.headerfilename, "a")
f.write("typedef %s %s;\n" % (fallback, type_name))
f.close()
return ret
def CheckTypeSize(context, type_name, header = None, language = None, expect = None):
"""This check can be used to get the size of a given type, or to check whether
the type is of expected size.
Arguments:
- type : str
the type to check
- includes : sequence
list of headers to include in the test code before testing the type
- language : str
'C' or 'C++'
- expect : int
if given, will test wether the type has the given number of bytes.
If not given, will automatically find the size.
Returns:
status : int
0 if the check failed, or the found size of the type if the check succeeded."""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s type: %s\n" % (type_name, msg))
return msg
src = includetext + header
if not expect is None:
# Only check if the given size is the right one
context.Display('Checking %s is %d bytes... ' % (type_name, expect))
# test code taken from autoconf: this is a pretty clever hack to find that
# a type is of a given size using only compilation. This speeds things up
# quite a bit compared to straightforward code using TryRun
src = src + r"""
typedef %s scons_check_type;
int main()
{
static int test_array[1 - 2 * !(((long int) (sizeof(scons_check_type))) == %d)];
test_array[0] = 0;
return 0;
}
"""
st = context.CompileProg(src % (type_name, expect), suffix)
if not st:
context.Display("yes\n")
_Have(context, "SIZEOF_%s" % type_name, expect,
"The size of `%s', as computed by sizeof." % type_name)
return expect
else:
context.Display("no\n")
_LogFailed(context, src, st)
return 0
else:
# Only check if the given size is the right one
context.Message('Checking size of %s ... ' % type_name)
# We have to be careful with the program we wish to test here since
# compilation will be attempted using the current environment's flags.
# So make sure that the program will compile without any warning. For
# example using: 'int main(int argc, char** argv)' will fail with the
# '-Wall -Werror' flags since the variables argc and argv would not be
# used in the program...
#
src = src + """
#include <stdlib.h>
#include <stdio.h>
int main() {
printf("%d", (int)sizeof(""" + type_name + """));
return 0;
}
"""
st, out = context.RunProg(src, suffix)
try:
size = int(out)
except ValueError:
# If cannot convert output of test prog to an integer (the size),
# something went wront, so just fail
st = 1
size = 0
if not st:
context.Display("yes\n")
_Have(context, "SIZEOF_%s" % type_name, size,
"The size of `%s', as computed by sizeof." % type_name)
return size
else:
context.Display("no\n")
_LogFailed(context, src, st)
return 0
return 0
def CheckDeclaration(context, symbol, includes = None, language = None):
"""Checks whether symbol is declared.
Use the same test as autoconf, that is test whether the symbol is defined
as a macro or can be used as an r-value.
Arguments:
symbol : str
the symbol to check
includes : str
Optional "header" can be defined to include a header file.
language : str
only C and C++ supported.
Returns:
status : bool
True if the check failed, False if succeeded."""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not includes:
includes = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for declaration %s: %s\n" % (symbol, msg))
return msg
src = includetext + includes
context.Display('Checking whether %s is declared... ' % symbol)
src = src + r"""
int main()
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}
""" % (symbol, symbol)
st = context.CompileProg(src, suffix)
_YesNoResult(context, st, "HAVE_DECL_" + symbol, src,
"Set to 1 if %s is defined." % symbol)
return st
def CheckLib(context, libs, func_name = None, header = None,
extra_libs = None, call = None, language = None, autoadd = 1,
append = True):
"""
Configure check for a C or C++ libraries "libs". Searches through
the list of libraries, until one is found where the test succeeds.
Tests if "func_name" or "call" exists in the library. Note: if it exists
in another library the test succeeds anyway!
Optional "header" can be defined to include a header file. If not given a
default prototype for "func_name" is added.
Optional "extra_libs" is a list of library names to be added after
"lib_name" in the build command. To be used for libraries that "lib_name"
depends on.
Optional "call" replaces the call to "func_name" in the test code. It must
consist of complete C statements, including a trailing ";".
Both "func_name" and "call" arguments are optional, and in that case, just
linking against the libs is tested.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
text = """
%s
%s""" % (includetext, header)
# Add a function declaration if needed.
if func_name and func_name != "main":
if not header:
text = text + """
#ifdef __cplusplus
extern "C"
#endif
char %s();
""" % func_name
# The actual test code.
if not call:
call = "%s();" % func_name
# if no function to test, leave main() blank
text = text + """
int
main() {
%s
return 0;
}
""" % (call or "")
if call:
i = call.find("\n")
if i > 0:
calltext = call[:i] + ".."
elif call[-1] == ';':
calltext = call[:-1]
else:
calltext = call
for lib_name in libs:
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for library %s: %s\n" % (lib_name, msg))
return msg
# if a function was specified to run in main(), say it
if call:
context.Display("Checking for %s in %s library %s... "
% (calltext, lang, lib_name))
# otherwise, just say the name of library and language
else:
context.Display("Checking for %s library %s... "
% (lang, lib_name))
if lib_name:
l = [ lib_name ]
if extra_libs:
l.extend(extra_libs)
if append:
oldLIBS = context.AppendLIBS(l)
else:
oldLIBS = context.PrependLIBS(l)
sym = "HAVE_LIB" + lib_name
else:
oldLIBS = -1
sym = None
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, sym, text,
"Define to 1 if you have the `%s' library." % lib_name)
if oldLIBS != -1 and (ret or not autoadd):
context.SetLIBS(oldLIBS)
if not ret:
return ret
return ret
#
# END OF PUBLIC FUNCTIONS
#
def _YesNoResult(context, ret, key, text, comment = None):
"""
Handle the result of a test with a "yes" or "no" result.
"ret" is the return value: empty if OK, error message when not.
"key" is the name of the symbol to be defined (HAVE_foo).
"text" is the source code of the program used for testing.
"comment" is the C comment to add above the line defining the symbol (the
comment is automatically put inside a /* */). If None, no comment is added.
"""
if key:
_Have(context, key, not ret, comment)
if ret:
context.Display("no\n")
_LogFailed(context, text, ret)
else:
context.Display("yes\n")
def _Have(context, key, have, comment = None):
"""
Store result of a test in context.havedict and context.headerfilename.
"key" is a "HAVE_abc" name. It is turned into all CAPITALS and non-
alphanumerics are replaced by an underscore.
The value of "have" can be:
1 - Feature is defined, add "#define key".
0 - Feature is not defined, add "/* #undef key */".
Adding "undef" is what autoconf does. Not useful for the
compiler, but it shows that the test was done.
number - Feature is defined to this number "#define key have".
Doesn't work for 0 or 1, use a string then.
string - Feature is defined to this string "#define key have".
Give "have" as is should appear in the header file, include quotes
when desired and escape special characters!
"""
key_up = key.upper()
key_up = re.sub('[^A-Z0-9_]', '_', key_up)
context.havedict[key_up] = have
if have == 1:
line = "#define %s 1\n" % key_up
elif have == 0:
line = "/* #undef %s */\n" % key_up
elif isinstance(have, IntType):
line = "#define %s %d\n" % (key_up, have)
else:
line = "#define %s %s\n" % (key_up, str(have))
if comment is not None:
lines = "\n/* %s */\n" % comment + line
else:
lines = "\n" + line
if context.headerfilename:
f = open(context.headerfilename, "a")
f.write(lines)
f.close()
elif hasattr(context,'config_h'):
context.config_h = context.config_h + lines
def _LogFailed(context, text, msg):
"""
Write to the log about a failed program.
Add line numbers, so that error messages can be understood.
"""
if LogInputFiles:
context.Log("Failed program was:\n")
lines = text.split('\n')
if len(lines) and lines[-1] == '':
lines = lines[:-1] # remove trailing empty line
n = 1
for line in lines:
context.Log("%d: %s\n" % (n, line))
n = n + 1
if LogErrorMessages:
context.Log("Error message: %s\n" % msg)
def _lang2suffix(lang):
"""
Convert a language name to a suffix.
When "lang" is empty or None C is assumed.
Returns a tuple (lang, suffix, None) when it works.
For an unrecognized language returns (None, None, msg).
Where:
lang = the unified language name
suffix = the suffix, including the leading dot
msg = an error message
"""
if not lang or lang in ["C", "c"]:
return ("C", ".c", None)
if lang in ["c++", "C++", "cpp", "CXX", "cxx"]:
return ("C++", ".cpp", None)
return None, None, "Unsupported language: %s" % lang
# vim: set sw=4 et sts=4 tw=79 fo+=l:
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Tool.as
Tool-specific initialization for as, the generic Posix assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/as.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
assemblers = ['as']
ASSuffixes = ['.s', '.asm', '.ASM']
ASPPSuffixes = ['.spp', '.SPP', '.sx']
if SCons.Util.case_sensitive_suffixes('.s', '.S'):
ASPPSuffixes.extend(['.S'])
else:
ASSuffixes.extend(['.S'])
def generate(env):
"""Add Builders and construction variables for as to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in ASSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASAction)
shared_obj.add_action(suffix, SCons.Defaults.ASAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
for suffix in ASPPSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASPPAction)
shared_obj.add_action(suffix, SCons.Defaults.ASPPAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
env['AS'] = env.Detect(assemblers) or 'as'
env['ASFLAGS'] = SCons.Util.CLVar('')
env['ASCOM'] = '$AS $ASFLAGS -o $TARGET $SOURCES'
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
def exists(env):
return env.Detect(assemblers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""engine.SCons.Tool.msvc
Tool-specific initialization for Microsoft Visual C/C++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/msvc.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os.path
import re
import sys
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvs
import SCons.Util
import SCons.Warnings
import SCons.Scanner.RC
from MSCommon import msvc_exists, msvc_setup_env_once
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def validate_vars(env):
"""Validate the PCH and PCHSTOP construction variables."""
if 'PCH' in env and env['PCH']:
if 'PCHSTOP' not in env:
raise SCons.Errors.UserError("The PCHSTOP construction must be defined if PCH is defined.")
if not SCons.Util.is_String(env['PCHSTOP']):
raise SCons.Errors.UserError("The PCHSTOP construction variable must be a string: %r"%env['PCHSTOP'])
def pch_emitter(target, source, env):
"""Adds the object file target."""
validate_vars(env)
pch = None
obj = None
for t in target:
if SCons.Util.splitext(str(t))[1] == '.pch':
pch = t
if SCons.Util.splitext(str(t))[1] == '.obj':
obj = t
if not obj:
obj = SCons.Util.splitext(str(pch))[0]+'.obj'
target = [pch, obj] # pch must be first, and obj second for the PCHCOM to work
return (target, source)
def object_emitter(target, source, env, parent_emitter):
"""Sets up the PCH dependencies for an object file."""
validate_vars(env)
parent_emitter(target, source, env)
# Add a dependency, but only if the target (e.g. 'Source1.obj')
# doesn't correspond to the pre-compiled header ('Source1.pch').
# If the basenames match, then this was most likely caused by
# someone adding the source file to both the env.PCH() and the
# env.Program() calls, and adding the explicit dependency would
# cause a cycle on the .pch file itself.
#
# See issue #2505 for a discussion of what to do if it turns
# out this assumption causes trouble in the wild:
# http://scons.tigris.org/issues/show_bug.cgi?id=2505
if 'PCH' in env:
pch = env['PCH']
if str(target[0]) != SCons.Util.splitext(str(pch))[0] + '.obj':
env.Depends(target, pch)
return (target, source)
def static_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.StaticObjectEmitter)
def shared_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.SharedObjectEmitter)
pch_action = SCons.Action.Action('$PCHCOM', '$PCHCOMSTR')
pch_builder = SCons.Builder.Builder(action=pch_action, suffix='.pch',
emitter=pch_emitter,
source_scanner=SCons.Tool.SourceFileScanner)
# Logic to build .rc files into .res files (resource files)
res_scanner = SCons.Scanner.RC.RCScan()
res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR')
res_builder = SCons.Builder.Builder(action=res_action,
src_suffix='.rc',
suffix='.res',
src_builder=[],
source_scanner=res_scanner)
def msvc_batch_key(action, env, target, source):
"""
Returns a key to identify unique batches of sources for compilation.
If batching is enabled (via the $MSVC_BATCH setting), then all
target+source pairs that use the same action, defined by the same
environment, and have the same target and source directories, will
be batched.
Returning None specifies that the specified target+source should not
be batched with other compilations.
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better.
# Note we need to do the env.subst so $MSVC_BATCH can be a reference to
# another construction variable, which is why we test for False and 0
# as strings.
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
# We're not using batching; return no key.
return None
t = target[0]
s = source[0]
if os.path.splitext(t.name)[0] != os.path.splitext(s.name)[0]:
# The base names are different, so this *must* be compiled
# separately; return no key.
return None
return (id(action), id(env), t.dir, s.dir)
def msvc_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fo flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fo string that specifies the target explicitly. Otherwise,
we return an /Fo string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .obj files).
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better. Removed
# len(source)==1 as batch mode can compile only one file
# (and it also fixed problem with compiling only one changed file
# with batch mode enabled)
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
return '/Fo$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fo
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fo${TARGET.dir}' + os.sep
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
def generate(env):
"""Add Builders and construction variables for MSVC++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
# TODO(batch): shouldn't reach in to cmdgen this way; necessary
# for now to bypass the checks in Builder.DictCmdGenerator.__call__()
# and allow .cc and .cpp to be compiled in the same command line.
static_obj.cmdgen.source_ext_match = False
shared_obj.cmdgen.source_ext_match = False
for suffix in CSuffixes:
static_obj.add_action(suffix, CAction)
shared_obj.add_action(suffix, ShCAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, CXXAction)
shared_obj.add_action(suffix, ShCXXAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
env['CCPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Z7") or ""}'])
env['CCPCHFLAGS'] = SCons.Util.CLVar(['${(PCH and "/Yu%s \\\"/Fp%s\\\""%(PCHSTOP or "",File(PCH))) or ""}'])
env['_MSVC_OUTPUT_FLAG'] = msvc_output_flag
env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $CCPCHFLAGS $CCPDBFLAGS'
env['CC'] = 'cl'
env['CCFLAGS'] = SCons.Util.CLVar('/nologo')
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '${TEMPFILE("$CC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CFLAGS $CCFLAGS $_CCCOMCOM")}'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '${TEMPFILE("$SHCC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCFLAGS $SHCCFLAGS $_CCCOMCOM")}'
env['CXX'] = '$CC'
env['CXXFLAGS'] = SCons.Util.CLVar('$( /TP $)')
env['CXXCOM'] = '${TEMPFILE("$CXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CXXFLAGS $CCFLAGS $_CCCOMCOM")}'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '${TEMPFILE("$SHCXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM")}'
env['CPPDEFPREFIX'] = '/D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '/I'
env['INCSUFFIX'] = ''
# env.Append(OBJEMITTER = [static_object_emitter])
# env.Append(SHOBJEMITTER = [shared_object_emitter])
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['RC'] = 'rc'
env['RCFLAGS'] = SCons.Util.CLVar('')
env['RCSUFFIXES']=['.rc','.rc2']
env['RCCOM'] = '$RC $_CPPDEFFLAGS $_CPPINCFLAGS $RCFLAGS /fo$TARGET $SOURCES'
env['BUILDERS']['RES'] = res_builder
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
# Set-up ms tools paths
msvc_setup_env_once(env)
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cc'
env['PCHPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Yd") or ""}'])
env['PCHCOM'] = '$CXX /Fo${TARGETS[1]} $CXXFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Yc$PCHSTOP /Fp${TARGETS[0]} $CCPDBFLAGS $PCHPDBFLAGS'
env['BUILDERS']['PCH'] = pch_builder
if 'ENV' not in env:
env['ENV'] = {}
if 'SystemRoot' not in env['ENV']: # required for dlls in the winsxs folders
env['ENV']['SystemRoot'] = SCons.Platform.win32.get_system_root()
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgicc.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Tool.GettextCommon module
Used by several tools of `gettext` toolset.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/GettextCommon.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Warnings
import re
#############################################################################
class XgettextToolWarning(SCons.Warnings.Warning): pass
class XgettextNotFound(XgettextToolWarning): pass
class MsginitToolWarning(SCons.Warnings.Warning): pass
class MsginitNotFound(MsginitToolWarning): pass
class MsgmergeToolWarning(SCons.Warnings.Warning): pass
class MsgmergeNotFound(MsgmergeToolWarning): pass
class MsgfmtToolWarning(SCons.Warnings.Warning): pass
class MsgfmtNotFound(MsgfmtToolWarning): pass
#############################################################################
SCons.Warnings.enableWarningClass(XgettextToolWarning)
SCons.Warnings.enableWarningClass(XgettextNotFound)
SCons.Warnings.enableWarningClass(MsginitToolWarning)
SCons.Warnings.enableWarningClass(MsginitNotFound)
SCons.Warnings.enableWarningClass(MsgmergeToolWarning)
SCons.Warnings.enableWarningClass(MsgmergeNotFound)
SCons.Warnings.enableWarningClass(MsgfmtToolWarning)
SCons.Warnings.enableWarningClass(MsgfmtNotFound)
#############################################################################
#############################################################################
class _POTargetFactory(object):
""" A factory of `PO` target files.
Factory defaults differ from these of `SCons.Node.FS.FS`. We set `precious`
(this is required by builders and actions gettext) and `noclean` flags by
default for all produced nodes.
"""
def __init__( self, env, nodefault = True, alias = None, precious = True
, noclean = True ):
""" Object constructor.
**Arguments**
- *env* (`SCons.Environment.Environment`)
- *nodefault* (`boolean`) - if `True`, produced nodes will be ignored
from default target `'.'`
- *alias* (`string`) - if provided, produced nodes will be automatically
added to this alias, and alias will be set as `AlwaysBuild`
- *precious* (`boolean`) - if `True`, the produced nodes will be set as
`Precious`.
- *noclen* (`boolean`) - if `True`, the produced nodes will be excluded
from `Clean`.
"""
self.env = env
self.alias = alias
self.precious = precious
self.noclean = noclean
self.nodefault = nodefault
def _create_node(self, name, factory, directory = None, create = 1):
""" Create node, and set it up to factory settings. """
import SCons.Util
node = factory(name, directory, create)
node.set_noclean(self.noclean)
node.set_precious(self.precious)
if self.nodefault:
self.env.Ignore('.', node)
if self.alias:
self.env.AlwaysBuild(self.env.Alias(self.alias, node))
return node
def Entry(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.Entry` """
return self._create_node(name, self.env.fs.Entry, directory, create)
def File(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.File` """
return self._create_node(name, self.env.fs.File, directory, create)
#############################################################################
#############################################################################
_re_comment = re.compile(r'(#[^\n\r]+)$', re.M)
_re_lang = re.compile(r'([a-zA-Z0-9_]+)', re.M)
#############################################################################
def _read_linguas_from_files(env, linguas_files = None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = [ 'LINGUAS' ]
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [ l for l in _re_lang.findall(contents) if l ]
linguas.extend(ls)
return linguas
#############################################################################
#############################################################################
from SCons.Builder import BuilderBase
#############################################################################
class _POFileBuilder(BuilderBase):
""" `PO` file builder.
This is multi-target single-source builder. In typical situation the source
is single `POT` file, e.g. `messages.pot`, and there are multiple `PO`
targets to be updated from this `POT`. We must run
`SCons.Builder.BuilderBase._execute()` separatelly for each target to track
dependencies separatelly for each target file.
**NOTE**: if we call `SCons.Builder.BuilderBase._execute(.., target, ...)`
with target being list of all targets, all targets would be rebuilt each time
one of the targets from this list is missing. This would happen, for example,
when new language `ll` enters `LINGUAS_FILE` (at this moment there is no
`ll.po` file yet). To avoid this, we override
`SCons.Builder.BuilerBase._execute()` and call it separatelly for each
target. Here we also append to the target list the languages read from
`LINGUAS_FILE`.
"""
#
#* The argument for overriding _execute(): We must use environment with
# builder overrides applied (see BuilderBase.__init__(). Here it comes for
# free.
#* The argument against using 'emitter': The emitter is called too late
# by BuilderBase._execute(). If user calls, for example:
#
# env.POUpdate(LINGUAS_FILE = 'LINGUAS')
#
# the builder throws error, because it is called with target=None,
# source=None and is trying to "generate" sources or target list first.
# If user calls
#
# env.POUpdate(['foo', 'baz'], LINGUAS_FILE = 'LINGUAS')
#
# the env.BuilderWrapper() calls our builder with target=None,
# source=['foo', 'baz']. The BuilderBase._execute() then splits execution
# and execute iterativelly (recursion) self._execute(None, source[i]).
# After that it calls emitter (which is quite too late). The emitter is
# also called in each iteration, what makes things yet worse.
def __init__(self, env, **kw):
if not 'suffix' in kw:
kw['suffix'] = '$POSUFFIX'
if not 'src_suffix' in kw:
kw['src_suffix'] = '$POTSUFFIX'
if not 'src_builder' in kw:
kw['src_builder'] = '_POTUpdateBuilder'
if not 'single_source' in kw:
kw['single_source'] = True
alias = None
if 'target_alias' in kw:
alias = kw['target_alias']
del kw['target_alias']
if not 'target_factory' in kw:
kw['target_factory'] = _POTargetFactory(env, alias=alias).File
BuilderBase.__init__(self, **kw)
def _execute(self, env, target, source, *args, **kw):
""" Execute builder's actions.
Here we append to `target` the languages read from `$LINGUAS_FILE` and
apply `SCons.Builder.BuilderBase._execute()` separatelly to each target.
The arguments and return value are same as for
`SCons.Builder.BuilderBase._execute()`.
"""
import SCons.Util
import SCons.Node
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE']:
linguas_files = env['LINGUAS_FILE']
# This prevents endless recursion loop (we'll be invoked once for
# each target appended here, we must not extend the list again).
env['LINGUAS_FILE'] = None
linguas = _read_linguas_from_files(env,linguas_files)
if SCons.Util.is_List(target):
target.extend(linguas)
elif target is not None:
target = [target] + linguas
else:
target = linguas
if not target:
# Let the SCons.BuilderBase to handle this patologic situation
return BuilderBase._execute( self, env, target, source, *args, **kw)
# The rest is ours
if not SCons.Util.is_List(target):
target = [ target ]
result = []
for tgt in target:
r = BuilderBase._execute( self, env, [tgt], source, *args, **kw)
result.extend(r)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return SCons.Node.NodeList(result)
#############################################################################
import SCons.Environment
#############################################################################
def _translate(env, target=[], source=SCons.Environment._null, *args, **kw):
""" Function for `Translate()` pseudo-builder """
pot = env.POTUpdate(None, source, *args, **kw)
po = env.POUpdate(target, pot, *args, **kw)
return po
#############################################################################
#############################################################################
class RPaths(object):
""" Callable object, which returns pathnames relative to SCons current
working directory.
It seems like `SCons.Node.FS.Base.get_path()` returns absolute paths
for nodes that are outside of current working directory (`env.fs.getcwd()`).
Here, we often have `SConscript`, `POT` and `PO` files within `po/`
directory and source files (e.g. `*.c`) outside of it. When generating `POT`
template file, references to source files are written to `POT` template, so
a translator may later quickly jump to appropriate source file and line from
its `PO` editor (e.g. `poedit`). Relative paths in `PO` file are usually
interpreted by `PO` editor as paths relative to the place, where `PO` file
lives. The absolute paths would make resultant `POT` file nonportable, as
the references would be correct only on the machine, where `POT` file was
recently re-created. For such reason, we need a function, which always
returns relative paths. This is the purpose of `RPaths` callable object.
The `__call__` method returns paths relative to current woking directory, but
we assume, that *xgettext(1)* is run from the directory, where target file is
going to be created.
Note, that this may not work for files distributed over several hosts or
across different drives on windows. We assume here, that single local
filesystem holds both source files and target `POT` templates.
Intended use of `RPaths` - in `xgettext.py`::
def generate(env):
from GettextCommon import RPaths
...
sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET, SOURCES)} $)'
env.Append(
...
XGETTEXTCOM = 'XGETTEXT ... ' + sources,
...
XgettextRPaths = RPaths(env)
)
"""
# NOTE: This callable object returns pathnames of dirs/files relative to
# current working directory. The pathname remains relative also for entries
# that are outside of current working directory (node, that
# SCons.Node.FS.File and siblings return absolute path in such case). For
# simplicity we compute path relative to current working directory, this
# seems be enough for our purposes (don't need TARGET variable and
# SCons.Defaults.Variable_Caller stuff).
def __init__(self, env):
""" Initialize `RPaths` callable object.
**Arguments**:
- *env* - a `SCons.Environment.Environment` object, defines *current
working dir*.
"""
self.env = env
# FIXME: I'm not sure, how it should be implemented (what the *args are in
# general, what is **kw).
def __call__(self, nodes, *args, **kw):
""" Return nodes' paths (strings) relative to current working directory.
**Arguments**:
- *nodes* ([`SCons.Node.FS.Base`]) - list of nodes.
- *args* - currently unused.
- *kw* - currently unused.
**Returns**:
- Tuple of strings, which represent paths relative to current working
directory (for given environment).
"""
# os.path.relpath is available only on python >= 2.6. We use our own
# implementation. It's taken from BareNecessities package:
# http://jimmyg.org/work/code/barenecessities/index.html
from posixpath import curdir
def relpath(path, start=curdir):
import posixpath
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
import os
import SCons.Node.FS
rpaths = ()
cwd = self.env.fs.getcwd().get_abspath()
for node in nodes:
rpath = None
if isinstance(node, SCons.Node.FS.Base):
rpath = relpath(node.get_abspath(), cwd)
# FIXME: Other types possible here?
if rpath is not None:
rpaths += (rpath,)
return rpaths
#############################################################################
#############################################################################
def _init_po_files(target, source, env):
""" Action function for `POInit` builder. """
nop = lambda target, source, env : 0
if env.has_key('POAUTOINIT'):
autoinit = env['POAUTOINIT']
else:
autoinit = False
# Well, if everything outside works well, this loop should do single
# iteration. Otherwise we are rebuilding all the targets even, if just
# one has changed (but is this out fault?).
for tgt in target:
if not tgt.exists():
if autoinit:
action = SCons.Action.Action('$MSGINITCOM', '$MSGINITCOMSTR')
else:
msg = 'File ' + repr(str(tgt)) + ' does not exist. ' \
+ 'If you are a translator, you can create it through: \n' \
+ '$MSGINITCOM'
action = SCons.Action.Action(nop, msg)
status = action([tgt], source, env)
if status: return status
return 0
#############################################################################
#############################################################################
def _detect_xgettext(env):
""" Detects *xgettext(1)* binary """
if env.has_key('XGETTEXT'):
return env['XGETTEXT']
xgettext = env.Detect('xgettext');
if xgettext:
return xgettext
raise SCons.Errors.StopError(XgettextNotFound,"Could not detect xgettext")
return None
#############################################################################
def _xgettext_exists(env):
return _detect_xgettext(env)
#############################################################################
#############################################################################
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if env.has_key('MSGINIT'):
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
return msginit
raise SCons.Errors.StopError(MsginitNotFound, "Could not detect msginit")
return None
#############################################################################
def _msginit_exists(env):
return _detect_msginit(env)
#############################################################################
#############################################################################
def _detect_msgmerge(env):
""" Detects *msgmerge(1)* program. """
if env.has_key('MSGMERGE'):
return env['MSGMERGE']
msgmerge = env.Detect('msgmerge');
if msgmerge:
return msgmerge
raise SCons.Errors.StopError(MsgmergeNotFound, "Could not detect msgmerge")
return None
#############################################################################
def _msgmerge_exists(env):
return _detect_msgmerge(env)
#############################################################################
#############################################################################
def _detect_msgfmt(env):
""" Detects *msgmfmt(1)* program. """
if env.has_key('MSGFMT'):
return env['MSGFMT']
msgfmt = env.Detect('msgfmt');
if msgfmt:
return msgfmt
raise SCons.Errors.StopError(MsgfmtNotFound, "Could not detect msgfmt")
return None
#############################################################################
def _msgfmt_exists(env):
return _detect_msgfmt(env)
#############################################################################
#############################################################################
def tool_list(platform, env):
""" List tools that shall be generated by top-level `gettext` tool """
return [ 'xgettext', 'msginit', 'msgmerge', 'msgfmt' ]
#############################################################################
| Python |
"""SCons.Tool.sgilink
Tool-specific initialization for the SGI MIPSPro linker on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgilink.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Util
import link
linkers = ['CC', 'cc']
def generate(env):
"""Add Builders and construction variables for MIPSPro to an Environment."""
link.generate(env)
env['LINK'] = env.Detect(linkers) or 'cc'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env['RPATHPREFIX'] = '-rpath '
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return env.Detect(linkers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Tool.latex
Tool-specific initialization for LaTeX.
Generates .dvi files from .latex or .ltx files
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/latex.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Action
import SCons.Defaults
import SCons.Scanner.LaTeX
import SCons.Util
import SCons.Tool
import SCons.Tool.tex
def LaTeXAuxFunction(target = None, source= None, env=None):
result = SCons.Tool.tex.InternalLaTeXAuxAction( SCons.Tool.tex.LaTeXAction, target, source, env )
if result != 0:
SCons.Tool.tex.check_file_error_message(env['LATEX'])
return result
LaTeXAuxAction = SCons.Action.Action(LaTeXAuxFunction,
strfunction=SCons.Tool.tex.TeXLaTeXStrFunction)
def generate(env):
"""Add Builders and construction variables for LaTeX to an Environment."""
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
import dvi
dvi.generate(env)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.ltx', LaTeXAuxAction)
bld.add_action('.latex', LaTeXAuxAction)
bld.add_emitter('.ltx', SCons.Tool.tex.tex_eps_emitter)
bld.add_emitter('.latex', SCons.Tool.tex.tex_eps_emitter)
SCons.Tool.tex.generate_common(env)
def exists(env):
SCons.Tool.tex.generate_darwin(env)
return env.Detect('latex')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Tool.tar
Tool-specific initialization for tar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tar.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
tars = ['tar', 'gtar']
TarAction = SCons.Action.Action('$TARCOM', '$TARCOMSTR')
TarBuilder = SCons.Builder.Builder(action = TarAction,
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for tar to an Environment."""
try:
bld = env['BUILDERS']['Tar']
except KeyError:
bld = TarBuilder
env['BUILDERS']['Tar'] = bld
env['TAR'] = env.Detect(tars) or 'gtar'
env['TARFLAGS'] = SCons.Util.CLVar('-c')
env['TARCOM'] = '$TAR $TARFLAGS -f $TARGET $SOURCES'
env['TARSUFFIX'] = '.tar'
def exists(env):
return env.Detect(tars)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
"""SCons.Tool.BitKeeper.py
Tool-specific initialization for the BitKeeper source code control
system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/BitKeeper.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
BitKeeper to an Environment."""
def BitKeeperFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The BitKeeper() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action("$BITKEEPERCOM", "$BITKEEPERCOMSTR")
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'BitKeeper', BitKeeperFactory)
env.BitKeeper = BitKeeperFactory
env['BITKEEPER'] = 'bk'
env['BITKEEPERGET'] = '$BITKEEPER get'
env['BITKEEPERGETFLAGS'] = SCons.Util.CLVar('')
env['BITKEEPERCOM'] = '$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET'
def exists(env):
return env.Detect('bk')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.