code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
'''
Created on Dec 22, 2010
@author: Nican
'''
import os.path
class ValueSaver(object):
'''
classdocs
'''
def __init__(self, fileName):
self.fileName = fileName
self.Values = {}
def readFromFile(self):
self.Values = {}
if not os.path.isfile(self.fileName):
return
with open(self.fileName, 'r') as f:
for line in f:
if line == "":
continue
split = line.split("=", 1)
if len(split) == 1:
continue
key = split[0]
value = split[1][:-1]
self.Values[ key ] = value
print(key + " = " + value)
def writeToFile(self):
with open(self.fileName, 'w') as f:
for key, value in self.Values.iteritems():
f.write(key + "=" + value + "\n")
f.flush()
def SetValue(self, key, value):
self.Values[key] = value
self.writeToFile()
def GetValue(self, key, default = None):
if key in self.Values:
return self.Values[key]
return default
if __name__ == '__main__':
test = ValueSafer('test.txt')
test.SetValue("a", "123")
test.SetValue("b", "456")
test.writeToFile()
test.readFromFile()
if test.GetValue("a") != "123" or test.GetValue("b") != "456":
print("Test for value safer failed!")
| Python |
'''
Created on Dec 4, 2010
@author: Nican
'''
import wx
class StartMenu(wx.BoxSizer):
'''
classdocs
'''
def __init__(self, parent):
'''
Constructor
'''
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
self.parent = parent
self.startButton = wx.Button(parent, -1, "START")
self.Add(self.startButton, 1, wx.EXPAND)
self.startButton.Bind(wx.EVT_BUTTON, self.OnStart)
def OnStart(self, e):
if self.startButton.GetLabel() == "START":
self.parent.startOctorotor()
#Change the label text
self.startButton.SetLabel("STOP")
else:
self.parent.stopOctoorotor()
#Change the label text
self.startButton.SetLabel("START") | Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
#!/usr/bin/env python
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import os
import os.path
import sys
setup(name="pydicom",
packages = find_packages(),
include_package_data = True,
version="1.0a",
package_data = {'dicom': ['testfiles/*.dcm']},
zip_safe = False, # want users to be able to see included examples,tests
description="Pure python package for DICOM medical file reading and writing",
author="Darcy Mason",
author_email="darcymason@gmail.com",
url="http://pydicom.googlecode.com",
license = "MIT license",
keywords = "dicom python medical imaging",
classifiers = [
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Libraries",
],
long_description = """
pydicom is a pure python package for parsing DICOM files.
DICOM is a standard (http://medical.nema.org) for communicating
medical images and related information such as reports
and radiotherapy objects.
pydicom makes it easy to read these complex files into natural
pythonic structures for easy manipulation.
Modified datasets can be written again to DICOM format files.
See the `Getting Started <http://code.google.com/p/pydicom/wiki/GettingStarted>`_
wiki page for installation and basic information, and the
`Pydicom User Guide <http://code.google.com/p/pydicom/wiki/PydicomUserGuide>`_ page
for an overview of how to use the pydicom library.
""",
test_loader = "dicom.test.run_tests:MyTestLoader",
test_suite = "dummy_string"
)
| Python |
#!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.10"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
def _same_content(path, content):
return open(path).read() == content
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
@_no_sandbox
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
@_no_sandbox
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
elif option == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| Python |
# csv2dict2011.py
# -*- coding: utf-8 -*-
"""Reformat a dicom dictionary csv file (from e.g. standards docs) to Python syntax
Write the DICOM dictionary elements as:
tag: (VR, VM, description, keyword, is_retired)
in python format
Also write the repeating groups or elements (e.g. group "50xx")
as masks that can be tested later for tag lookups that didn't work
"""
#
# Copyright 2011-2012, Darcy Mason
# This file is part of pydicom, released under an MIT licence.
# See license.txt file for more details.
csv_filename = "dict_2011.csv"
pydict_filename = "_dicom_dict.py"
main_dict_name = "DicomDictionary"
mask_dict_name = "RepeatersDictionary"
def write_dict(f, dict_name, attributes, tagIsString):
if tagIsString:
entry_format = """'%s': ('%s', '%s', "%s", '%s', '%s')"""
else:
entry_format = """%s: ('%s', '%s', "%s", '%s', '%s')"""
f.write("\n%s = {\n" % dict_name)
f.write(",\n".join(entry_format % attribute for attribute in attributes))
f.write("}\n")
if __name__ == "__main__":
import csv # comma-separated value module
csv_reader = csv.reader(file(csv_filename, 'rb'))
main_attributes = []
mask_attributes = []
for row in csv_reader:
tag, description, keyword, VR, VM, is_retired = row
if tag == '' or tag == "Tag":
continue
tag = tag.strip() # at least one item has extra blank on end
VR = VR.strip() # similarly, some VRs have extra blank
keyword = keyword.strip() # just in case
group, elem = tag[1:-1].split(",")
if is_retired.strip() == 'RET':
is_retired = 'Retired'
if VR == "see note": # used with some delimiter tags
VR = "NONE" # to be same as '08 dict in pydicom
# Handle one case "(0020,3100 to 31FF)" by converting to mask
# Do in general way in case others like this come in future standards
if " to " in elem:
from_elem, to_elem = elem.split(" to ")
if from_elem.endswith("00") and to_elem.endswith("FF"):
elem = from_elem[:2] + "xx"
else:
raise NotImplementedError, "Cannot mask '%s'" % elem
if description.endswith(" "):
description = description.rstrip()
description = description.replace("’", "'") # non-ascii apostrophe
description = description.replace("‑", "-") # non-ascii dash used, shows in utf-8 as this a character
description = description.replace("µ", "u") # replace micro symbol
# If blank (e.g. (0018,9445) and (0028,0020)), then add dummy vals
if VR == '' and VM == '' and is_retired:
VR = 'OB'
VM = '1'
description = "Retired-blank"
# One odd tag in '11 standard (0028,3006)
if VR == 'US or OW': # extra space
VR = 'US or OW'
# Handle retired "repeating group" tags e.g. group "50xx"
if "x" in group or "x" in elem:
tag = group + elem # simple concatenation
mask_attributes.append((tag, VR, VM, description, is_retired, keyword))
else:
tag = "0x%s%s" % (group, elem)
main_attributes.append((tag, VR, VM, description, is_retired, keyword))
py_file = file(pydict_filename, "w")
py_file.write("# %s\n" % pydict_filename)
py_file.write('"""DICOM data dictionary auto-generated by %s"""\n' % __file__)
write_dict(py_file, main_dict_name, main_attributes, tagIsString=False)
write_dict(py_file, mask_dict_name, mask_attributes, tagIsString=True)
py_file.close()
print "Finished creating python file %s containing the dicom dictionary" % pydict_filename
print "Wrote %d tags" % (len(main_attributes)+len(mask_attributes))
| Python |
# make_UID_dict.py
"""Reformat a UID list csv file (Table A-1 PS3.6-2008) to Python syntax
Write the dict elements as:
UID: (name, type, name_info, is_retired)
in python format
name_info is extra information extracted from very long names, e.g.
which bit size a particular transfer syntax is default for
is_retired is 'Retired' if true, else is ''
"""
#
# Copyright 2008-2012, Darcy Mason
# This file is part of pydicom.
# See the license.txt file for license information.
csv_filename = "UID_dictionary.csv"
pydict_filename = "_UID_dict.py"
dict_name = "UID_dictionary"
def write_dict(f, dict_name, attributes):
entry_format = """'%s': ('%s', '%s', '%s', '%s')"""
f.write("\n%s = {\n" % dict_name)
f.write(",\n".join(entry_format % attribute for attribute in attributes))
f.write("}\n")
if __name__ == "__main__":
import csv # comma-separated value module
csv_reader = csv.reader(file(csv_filename, 'rb'))
attributes = []
for row in csv_reader:
UID, name, UIDtype, reference = row
name_info = ""
is_retired = ""
name = name.replace("\x96", "-") # non-ascii character
if name.endswith("(Retired)"):
name = name[:-10]
is_retired = "Retired"
if ":" in name:
name, name_info = name.split(":")
if "&" in name:
name = name.replace("&", "and")
name_info = name_info.strip() # clear leading (and trailing, if any) whitespace
attributes.append((UID, name, UIDtype, name_info, is_retired)) # leave Part reference out
py_file = file(pydict_filename, "wb")
py_file.write("# %s\n" % pydict_filename)
py_file.write('"""\n%s\n"""\n' % "Dictionary of UID: (name, type, name_info, is_retired)\n")
py_file.write('# Auto-generated by %s"""\n' % __file__)
write_dict(py_file, dict_name, attributes)
py_file.close()
print "Finished creating python file %s containing the UID dictionary" % pydict_filename
print "Wrote %d elements" % len(attributes)
| Python |
# csv2dict.py
"""Reformat a dicom dictionary csv file (from e.g. standards docs) to Python syntax
Write the DICOM dictionary elements as:
tag: (VR, VM, description, is_retired)
in python format
Also write the repeating groups or elements (e.g. group "50xx")
as masks that can be tested later for tag lookups that didn't work
"""
#
# Copyright 2008-2012, Darcy Mason
# This file is part of pydicom.
# See the license.txt file
csv_filename = "DICOM_dictionary_2008.csv"
pydict_filename = "_dicom_dict.py"
main_dict_name = "DicomDictionary"
mask_dict_name = "RepeatersDictionary"
def write_dict(f, dict_name, attributes, tagIsString):
if tagIsString:
entry_format = """'%s': ('%s', '%s', "%s", '%s')"""
else:
entry_format = """%s: ('%s', '%s', "%s", '%s')"""
f.write("\n%s = {\n" % dict_name)
f.write(",\n".join(entry_format % attribute for attribute in attributes))
f.write("}\n")
if __name__ == "__main__":
import csv # comma-separated value module
csv_reader = csv.reader(file(csv_filename, 'rb'))
main_attributes = []
mask_attributes = []
for row in csv_reader:
tag, description, VR, VM, is_retired = row
tag = tag.strip() # at least one item has extra blank on end
group, elem = tag[1:-1].split(",")
# Handle one case "(0020,3100 to 31FF)" by converting to mask
# Do in general way in case others like this come in future standards
if " to " in elem:
from_elem, to_elem = elem.split(" to ")
if from_elem.endswith("00") and to_elem.endswith("FF"):
elem = from_elem[:2] + "xx"
else:
raise NotImplementedError, "Cannot mask '%s'" % elem
description = description.replace("\x92", "'") # non-ascii apostrophe used
description = description.replace("\x96", "-") # non-ascii dash used
# If blank (e.g. (0018,9445) and (0028,0020)), then add dummy vals
if VR == '' and VM == '' and is_retired:
VR = 'OB'
VM = '1'
description = "Retired-blank"
# Handle retired "repeating group" tags e.g. group "50xx"
if "x" in group or "x" in elem:
tag = group + elem # simple concatenation
mask_attributes.append((tag, VR, VM, description, is_retired))
else:
tag = "0x%s%s" % (group, elem)
main_attributes.append((tag, VR, VM, description, is_retired))
py_file = file(pydict_filename, "wb")
py_file.write("# %s\n" % pydict_filename)
py_file.write('"""DICOM data dictionary auto-generated by %s"""\n' % __file__)
write_dict(py_file, main_dict_name, main_attributes, tagIsString=False)
write_dict(py_file, mask_dict_name, mask_attributes, tagIsString=True)
py_file.close()
print "Finished creating python file %s containing the dicom dictionary" % pydict_filename
print "Wrote %d tags" % (len(main_attributes)+len(mask_attributes))
| Python |
from __future__ import with_statement
# file "make_private_dict.py"
# Copyright (c) 2009 Daniel Nanz
# This file is released under the pydicom (http://code.google.com/p/pydicom/)
# license.
# See the file license.txt included with the pydicom distribution, also
# available at http://pydicom.googlecode.com
'''
-- Usage ------------------ (>= python 2.5, <3)---
python make_private_dict_alt.py
or
python make_private_dict_alt.py target_file_path
--------------------------------------------------
This script reads the DICOM private tag information as
maintained by the GDCM project (http://sourceforge.net/projects/gdcm/)
from their website and prints it either to sys.stdout
(if target_file_path == None) or to the file identified by a input
target_file_path.
The output is structured such, that for target_file_path = "_private_dict.py"
the output file can replace the current _private_dict.py file of the pydicom
source, which should allow straightforward testing.
'''
import urllib2
import io
import xml.etree.cElementTree as ET
import sys
import datetime
import os
import pprint
GDCM_URL = ''.join(('http://gdcm.svn.sf.net/viewvc/gdcm/trunk',
'/Source/DataDictionary/privatedicts.xml'))
UNKNOWN_NAME = 'Unknown'
PRIVATE_DICT_NAME = 'private_dictionaries'
def get_private_dict_from_GDCM(url=GDCM_URL, retired_field=''):
'''open GDCM_URL, read content into BytesIO file-like object and parse
into an ElementTree instance
'''
etree = ET.parse(io.BytesIO(urllib2.urlopen(GDCM_URL).read()))
p_dict = etree.getroot()
entries = [entry for entry in p_dict.findall('entry')]
private_dict = dict()
for e in entries:
d = dict()
for item in e.items():
d[item[0]] = item[1]
tag_string = ''.join((d['group'], d['element']))
if d['name'] == '?':
d['name'] = UNKNOWN_NAME
dict_entry = (d['vr'], d['vm'], d['name'], retired_field)
owner = d['owner']
if owner in private_dict.keys():
pass
else:
private_dict[owner] = dict()
curr_dict = private_dict[owner]
curr_dict[tag_string] = dict_entry
return private_dict
def get_introductory_text(filename, datestring):
s = '\n'.join(('# ' + filename,
'# This file is autogenerated by "make_private_dict.py",',
'# from private elements list maintained by the GDCM project',
'# ('+ GDCM_URL + ').',
'# Downloaded on ' + datestring + '.',
'# See the pydicom license.txt file for license information on pydicom, and GDCM.',
'',
'# This is a dictionary of DICOM dictionaries.',
'# The outer dictionary key is the Private Creator name ("owner"),',
'# the inner dictionary is a map of DICOM tag to ',
'# (VR, type, name, is_retired)',
'',
PRIVATE_DICT_NAME + ' = \\\n'))
return s
def main():
'''Get private dict from GDCM project. Write to sys.stdout or to output
file given as pathname and as the first argument to the script.
'''
private_dict = get_private_dict_from_GDCM()
try:
file_path = sys.argv[1]
except IndexError:
file_path = None
if file_path != None:
with open(file_path, 'wb') as fd:
filename = os.path.basename(file_path)
datestring = datetime.date.isoformat(datetime.date.today())
int_text = get_introductory_text(filename, datestring)
fd.write(int_text)
pprint.pprint(private_dict, fd)
else:
pprint.pprint(private_dict)
if __name__ == '__main__':
main()
| Python |
# values.py
"""Functions for converting values of DICOM data elements to proper python types
"""
# Copyright (c) 2010-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from struct import unpack, calcsize, pack
import logging
logger = logging.getLogger('pydicom')
from dicom.valuerep import PersonName, MultiString
from dicom.multival import MultiValue
import dicom.UID
from dicom.tag import Tag, TupleTag, SequenceDelimiterTag
from dicom.datadict import dictionaryVR
from dicom.filereader import read_sequence
from io import BytesIO
from dicom.valuerep import DS, IS
from dicom.charset import default_encoding
from dicom import in_py3
def convert_tag(byte_string, is_little_endian, offset=0):
if is_little_endian:
struct_format = "<HH"
else:
struct_format = ">HH"
return TupleTag(unpack(struct_format, byte_string[offset:offset+4]))
def convert_ATvalue(byte_string, is_little_endian, struct_format=None):
"""Read and return AT (tag) data_element value(s)"""
length = len(byte_string)
if length == 4:
return convert_tag(byte_string, is_little_endian)
# length > 4
if length % 4 != 0:
logger.warn("Expected length to be multiple of 4 for VR 'AT', got length %d at file position 0x%x", length, fp.tell()-4)
return MultiValue(Tag,[convert_tag(byte_string, is_little_endian, offset=x)
for x in range(0, length, 4)])
def convert_DS_string(byte_string, is_little_endian, struct_format=None):
"""Read and return a DS value or list of values"""
return MultiString(byte_string, valtype=DS)
def convert_IS_string(byte_string, is_little_endian, struct_format=None):
"""Read and return an IS value or list of values"""
return MultiString(byte_string, valtype=IS)
def convert_numbers(byte_string, is_little_endian, struct_format):
"""Read a "value" of type struct_format from the dicom file. "Value" can be more than one number"""
endianChar = '><'[is_little_endian]
bytes_per_value = calcsize("="+struct_format) # "=" means use 'standard' size, needed on 64-bit systems.
length = len(byte_string)
if length % bytes_per_value != 0:
logger.warn("Expected length to be even multiple of number size")
format_string = "%c%u%c" % (endianChar, length // bytes_per_value, struct_format)
value = unpack(format_string, byte_string)
if len(value) == 1:
return value[0]
else:
return list(value) # convert from tuple to a list so can modify if need to
def convert_OBvalue(byte_string, is_little_endian, struct_format=None):
"""Return the raw bytes from reading an OB value"""
return byte_string
def convert_OWvalue(byte_string, is_little_endian, struct_format=None):
"""Return the raw bytes from reading an OW value rep
Note: pydicom does NOT do byte swapping, except in
dataset.pixel_array function
"""
return convert_OBvalue(byte_string, is_little_endian) # for now, Maybe later will have own routine
def convert_PN(byte_string, is_little_endian, struct_format=None):
"""Read and return string(s) as PersonName instance(s)"""
return MultiString(byte_string, valtype=PersonName)
def convert_string(byte_string, is_little_endian, struct_format=None):
"""Read and return a string or strings"""
return MultiString(byte_string)
def convert_single_string(byte_string, is_little_endian, struct_format=None):
"""Read and return a single string (backslash character does not split)"""
if byte_string and byte_string.endswith(b' '):
byte_string = byte_string[:-1]
if in_py3:
bytestring = bytestring.decode(default_encoding)
return byte_string
def convert_SQ(byte_string, is_implicit_VR, is_little_endian, offset=0):
"""Convert a sequence that has been read as bytes but not yet parsed."""
fp = BytesIO(byte_string)
seq = read_sequence(fp, is_implicit_VR, is_little_endian, len(byte_string), offset)
return seq
def convert_UI(byte_string, is_little_endian, struct_format=None):
"""Read and return a UI values or values"""
# Strip off 0-byte padding for even length (if there)
if byte_string and byte_string.endswith(b'\0'):
byte_string = byte_string[:-1]
return MultiString(byte_string, dicom.UID.UID)
def convert_UN(byte_string, is_little_endian, struct_format=None):
"""Return a byte string for a VR of 'UN' (unknown)"""
return byte_string
def convert_value(VR, raw_data_element):
"""Return the converted value (from raw bytes) for the given VR"""
tag = Tag(raw_data_element.tag)
if VR not in converters:
raise NotImplementedError("Unknown Value Representation '{0}'".format(VR))
# Look up the function to convert that VR
# Dispatch two cases: a plain converter, or a number one which needs a format string
if isinstance(converters[VR], tuple):
converter, num_format = converters[VR]
else:
converter = converters[VR]
num_format = None
byte_string = raw_data_element.value
is_little_endian = raw_data_element.is_little_endian
is_implicit_VR = raw_data_element.is_implicit_VR
# Not only two cases. Also need extra info if is a raw sequence
if VR != "SQ":
value = converter(byte_string, is_little_endian, num_format)
else:
value = convert_SQ(byte_string, is_implicit_VR, is_little_endian, raw_data_element.value_tell)
return value
# converters map a VR to the function to read the value(s).
# for convert_numbers, the converter maps to a tuple (function, struct_format)
# (struct_format in python struct module style)
converters = {'UL': (convert_numbers, 'L'),
'SL': (convert_numbers, 'l'),
'US': (convert_numbers, 'H'),
'SS': (convert_numbers, 'h'),
'FL': (convert_numbers, 'f'),
'FD': (convert_numbers, 'd'),
'OF': (convert_numbers, 'f'),
'OB': convert_OBvalue,
'UI': convert_UI,
'SH': convert_string,
'DA': convert_string,
'TM': convert_string,
'CS': convert_string,
'PN': convert_PN,
'LO': convert_string,
'IS': convert_IS_string,
'DS': convert_DS_string,
'AE': convert_string,
'AS': convert_string,
'LT': convert_single_string,
'SQ': convert_SQ,
'UN': convert_UN,
'AT': convert_ATvalue,
'ST': convert_string,
'OW': convert_OWvalue,
'OW/OB': convert_OBvalue,# note OW/OB depends on other items, which we don't know at read time
'OB/OW': convert_OBvalue,
'OW or OB': convert_OBvalue,
'OB or OW': convert_OBvalue,
'US or SS': convert_OWvalue,
'US or SS or OW': convert_OWvalue,
'US\\US or SS\\US': convert_OWvalue,
'DT': convert_string,
'UT': convert_single_string,
}
if __name__ == "__main__":
pass
| Python |
# valuerep.py
"""Special classes for DICOM value representations (VR)"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from decimal import Decimal
import dicom.config
from dicom.multival import MultiValue
from dicom import in_py3
default_encoding = "iso8859" # can't import from charset or get circular import
# For reading/writing data elements, these ones have longer explicit VR format
extra_length_VRs = ('OB', 'OW', 'OF', 'SQ', 'UN', 'UT')
# VRs that can be affected by character repertoire in (0008,0005) Specific Character Set
# See PS-3.5 (2011), section 6.1.2 Graphic Characters
text_VRs = ('SH', 'LO', 'ST', 'LT', 'UT') # and PN, but it is handled separately.
class DS(Decimal):
"""Store values for DICOM VR of DS (Decimal String).
Note: if constructed by an empty string, returns the empty string,
not an instance of this class.
"""
def __new__(cls, val):
"""Create an instance of DS object, or return a blank string if one is
passed in, e.g. from a type 2 DICOM blank value.
"""
# DICOM allows spaces around the string, but python doesn't, so clean it
if isinstance(val, (str, unicode)):
val=val.strip()
if val == '':
return val
if isinstance(val, float) and not dicom.config.allow_DS_float:
msg = ("DS cannot be instantiated with a float value, unless "
"config.allow_DS_float is set to True. It is recommended to "
"convert to a string instead, with the desired number of digits, "
"or use Decimal.quantize and pass a Decimal instance.")
raise TypeError(msg)
if not isinstance(val, Decimal):
val = super(DS, cls).__new__(cls, val)
if len(str(val)) > 16 and dicom.config.enforce_valid_values:
msg = ("DS value representation must be <= 16 characters by DICOM "
"standard. Initialize with a smaller string, or set config.enforce_valid_values "
"to False to override, "
"or use Decimal.quantize() and initialize with a Decimal instance.")
raise OverflowError(msg)
return val
def __init__(self, val):
"""Store the original string if one given, for exact write-out of same
value later. E.g. if set '1.23e2', Decimal would write '123', but DS
will use the original
"""
# ... also if user changes a data element value, then will get
# a different Decimal, as Decimal is immutable.
if isinstance(val, (str, unicode)):
self.original_string = val
def __repr__(self):
if hasattr(self, 'original_string'):
return "'" + self.original_string + "'"
else:
return "'" + super(DS,self).__str__() + "'"
class IS(int):
"""Derived class of int. Stores original integer string for exact rewriting
of the string originally read or stored.
"""
# Unlikely that str(int) will not be the same as the original, but could happen
# with leading zeros.
def __new__(cls, val):
"""Create instance if new integer string"""
if isinstance(val, (str, unicode)) and val.strip() == '':
return ''
newval = super(IS, cls).__new__(cls, val)
# check if a float or Decimal passed in, then could have lost info,
# and will raise error. E.g. IS(Decimal('1')) is ok, but not IS(1.23)
if isinstance(val, (float, Decimal)) and newval != val:
raise TypeError("Could not convert value to integer without loss")
# Checks in case underlying int is >32 bits, DICOM does not allow this
if (newval < -2**31 or newval >= 2**31) and dicom.config.enforce_valid_values:
message = "Value exceeds DICOM limits of -2**31 to (2**31 - 1) for IS"
raise OverflowError(message)
return newval
def __init__(self, val):
# If a string passed, then store it
if isinstance(val, (str, unicode)):
self.original_string = val
def __repr__(self):
if hasattr(self, 'original_string'):
return "'" + self.original_string + "'"
else:
return "'" + int.__str__(self) + "'"
def MultiString(val, valtype=str):
"""Split a bytestring by delimiters if there are any
val -- DICOM bytestring to split up
valtype -- default str, but can be e.g. UID to overwrite to a specific type
"""
# Remove trailing blank used to pad to even length
# 2005.05.25: also check for trailing 0, error made in PET files we are converting
if val and (val.endswith(b' ') or val.endswith(b'\x00')):
val = val[:-1]
if in_py3 and isinstance(val, bytes):
val = val.decode(default_encoding)
splitup = [valtype(x) if x else x for x in val.split("\\")]
if len(splitup) == 1:
return splitup[0]
else:
return MultiValue(valtype, splitup)
class PersonNameBase(object):
"""Base class for Person Name classes"""
def __init__(self, val):
"""Initialize the PN properties"""
# Note normally use __new__ on subclassing an immutable, but here we just want
# to do some pre-processing for properties
# PS 3.5-2008 section 6.2 (p.28) and 6.2.1 describes PN. Briefly:
# single-byte-characters=ideographic characters=phonetic-characters
# (each with?):
# family-name-complex^Given-name-complex^Middle-name^name-prefix^name-suffix
self.parse()
def formatted(self, format_str):
"""Return a formatted string according to the format pattern
Use "...%(property)...%(property)..." where property is one of
family_name, given_name, middle_name, name_prefix, name_suffix
"""
return format_str % self.__dict__
def parse(self):
"""Break down the components and name parts"""
self.components = self.split("=")
nComponents = len(self.components)
self.single_byte = self.components[0]
self.ideographic = ''
self.phonetic = ''
if nComponents > 1:
self.ideographic = self.components[1]
if nComponents > 2:
self.phonetic = self.components[2]
if self.single_byte:
name_string = self.single_byte + "^^^^" # in case missing trailing items are left out
parts = name_string.split("^")[:5]
(self.family_name, self.given_name, self.middle_name,
self.name_prefix, self.name_suffix) = parts
else:
(self.family_name, self.given_name, self.middle_name,
self.name_prefix, self.name_suffix) = ('', '', '', '', '')
class PersonName(PersonNameBase, str):
"""Human-friendly class to hold VR of Person Name (PN)
Name is parsed into the following properties:
single-byte, ideographic, and phonetic components (PS3.5-2008 6.2.1)
family_name,
given_name,
middle_name,
name_prefix,
name_suffix
"""
def __new__(cls, val):
"""Return instance of the new class"""
# Check if trying to convert a string that has already been converted
if isinstance(val, PersonName):
return val
return super(PersonName, cls).__new__(cls, val)
def family_comma_given(self):
"""Return name as 'Family-name, Given-name'"""
return self.formatted("%(family_name)s, %(given_name)s")
# def __str__(self):
# return str(self.byte_string)
# XXX need to process the ideographic or phonetic components?
# def __len__(self):
# return len(self.byte_string)
class PersonNameUnicode(PersonNameBase, unicode):
"""Unicode version of Person Name"""
def __new__(cls, val, encodings):
"""Return unicode string after conversion of each part
val -- the PN value to store
encodings -- a list of python encodings, generally found
from dicom.charset.python_encodings mapping
of values in DICOM data element (0008,0005).
"""
from dicom.charset import clean_escseq # in here to avoid circular import
# Make the possible three character encodings explicit:
if not isinstance(encodings, list):
encodings = [encodings]*3
if len(encodings) == 2:
encodings.append(encodings[1])
components = val.split("=")
# Remove the first encoding if only one component is present
if (len(components) == 1):
del encodings[0]
unicomponents = [clean_escseq(
unicode(components[i],encodings[i]), encodings)
for i, component in enumerate(components)]
new_val = u"=".join(unicomponents)
return unicode.__new__(cls, new_val)
def __init__(self, val, encodings):
self.encodings = encodings
PersonNameBase.__init__(self, val)
def family_comma_given(self):
"""Return name as 'Family-name, Given-name'"""
return self.formatted("%(family_name)u, %(given_name)u")
| Python |
# charset.py
"""Handle alternate character sets for character strings."""
#
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
import logging
logger = logging.getLogger('pydicom')
from dicom.valuerep import PersonNameUnicode, text_VRs
# Map DICOM Specific Character Set to python equivalent
python_encoding = {
'': 'iso8859', # default character set for DICOM
'ISO_IR 6': 'iso8859', # alias for latin_1 too
'ISO_IR 100': 'latin_1',
'ISO 2022 IR 87': 'iso2022_jp',
'ISO 2022 IR 13': 'shift_jis',
'ISO 2022 IR 149': 'euc_kr', # needs cleanup via clean_escseq()
'ISO_IR 192': 'UTF8', # from Chinese example, 2008 PS3.5 Annex J p1-4
'GB18030': 'GB18030',
'ISO_IR 126': 'iso_ir_126', # Greek
'ISO_IR 127': 'iso_ir_127', # Arab
'ISO_IR 138': 'iso_ir_138', # Hebrew
'ISO_IR 144': 'iso_ir_144', # Russian
}
default_encoding = "iso8859"
def clean_escseq(element, encodings):
"""Remove escape sequences that Python does not remove from
Korean encoding ISO 2022 IR 149 due to the G1 code element.
"""
if 'euc_kr' in encodings:
return element.replace(
"\x1b\x24\x29\x43", "").replace("\x1b\x28\x42", "")
else:
return element
# DICOM PS3.5-2008 6.1.1 (p 18) says:
# default is ISO-IR 6 G0, equiv to common chr set of ISO 8859 (PS3.5 6.1.2.1)
# (0008,0005) value 1 can *replace* the default encoding...
# for VRs of SH, LO, ST, LT, PN and UT (PS3.5 6.1.2.3)...
# with a single-byte character encoding
# if (0008,0005) is multi-valued, then value 1 (or default if blank)...
# is used until code extension escape sequence is hit,
# which can be at start of string, or after CR/LF, FF, or
# in Person Name PN, after ^ or =
# NOTE also that 7.5.3 SEQUENCE INHERITANCE states that if (0008,0005)
# is not present in a sequence item then it is inherited from its parent.
def decode(data_element, dicom_character_set):
"""Apply the DICOM character encoding to the data element
data_element -- DataElement instance containing a value to convert
dicom_character_set -- the value of Specific Character Set (0008,0005),
which may be a single value,
a multiple value (code extension), or
may also be '' or None.
If blank or None, ISO_IR 6 is used.
"""
if not dicom_character_set:
dicom_character_set = ['ISO_IR 6']
have_character_set_list = True
try:
dicom_character_set.append # check if is list-like object
except AttributeError:
have_character_set_list = False
if have_character_set_list:
if not dicom_character_set[0]:
dicom_character_set[0] = "ISO_IR 6"
else:
dicom_character_set = [dicom_character_set]
encodings = [python_encoding[x] for x in dicom_character_set]
if len(encodings) == 1:
encodings = [encodings[0]]*3
if len(encodings) == 2:
encodings.append(encodings[1])
# decode the string value to unicode
# PN is special case as may have 3 components with differenct chr sets
if data_element.VR == "PN":
# logger.warn("%s ... type: %s" %(str(data_element), type(data_element.VR)))
if data_element.VM == 1:
data_element.value = PersonNameUnicode(data_element.value, encodings)
else:
data_element.value = [PersonNameUnicode(value, encodings)
for value in data_element.value]
if data_element.VR in text_VRs:
# Remove the first encoding if this is a multi-byte encoding
if len(encodings) > 1:
del encodings[0]
if data_element.VM == 1:
data_element.value = clean_escseq(
data_element.value.decode(
encodings[0]), encodings)
else:
data_element.value = [clean_escseq(
value.decode(encodings[0]), encodings)
for value in data_element.value]
| Python |
# datadict.py
# -*- coding: utf-8 -*-
"""Access dicom dictionary information"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
import sys
import logging
logger = logging.getLogger("pydicom")
from dicom.tag import Tag
from dicom._dicom_dict import DicomDictionary # the actual dict of {tag: (VR, VM, name, is_retired, keyword), ...}
from dicom._dicom_dict import RepeatersDictionary # those with tags like "(50xx, 0005)"
from dicom._private_dict import private_dictionaries
import warnings
from dicom import in_py3
# Generate mask dict for checking repeating groups etc.
# Map a true bitwise mask to the DICOM mask with "x"'s in it.
masks = {}
for mask_x in RepeatersDictionary:
# mask1 is XOR'd to see that all non-"x" bits are identical (XOR result = 0 if bits same)
# then AND those out with 0 bits at the "x" ("we don't care") location using mask2
mask1 = long(mask_x.replace("x", "0"),16)
mask2 = long("".join(["F0"[c=="x"] for c in mask_x]),16)
masks[mask_x] = (mask1, mask2)
# For shorter naming of dicom member elements, put an entry here
# (longer naming can also still be used)
# The descriptive name must start with the long version (not replaced if internal)
shortNames = [("BeamLimitingDevice", "BLD"),
("RTBeamLimitingDevice", "RTBLD"),
("ControlPoint", "CP"),
("Referenced", "Refd")
]
def mask_match(tag):
for mask_x, (mask1, mask2) in masks.items():
if (tag ^ mask1) & mask2 == 0:
return mask_x
return None
def get_entry(tag):
"""Return the tuple (VR, VM, name, is_retired, keyword) from the DICOM dictionary
If the entry is not in the main dictionary, check the masked ones,
e.g. repeating groups like 50xx, etc.
"""
tag = Tag(tag)
try:
return DicomDictionary[tag]
except KeyError:
mask_x = mask_match(tag)
if mask_x:
return RepeatersDictionary[mask_x]
else:
raise KeyError("Tag {0} not found in DICOM dictionary".format(tag))
def dictionary_description(tag):
"""Return the descriptive text for the given dicom tag."""
return get_entry(tag)[2]
def dictionaryVM(tag):
"""Return the dicom value multiplicity for the given dicom tag."""
return get_entry(tag)[1]
def dictionaryVR(tag):
"""Return the dicom value representation for the given dicom tag."""
return get_entry(tag)[0]
def dictionary_has_tag(tag):
"""Return True if the dicom dictionary has an entry for the given tag."""
return (tag in DicomDictionary)
def dictionary_keyword(tag):
"""Return the official DICOM standard (since 2011) keyword for the tag"""
return get_entry(tag)[4]
# Set up a translation table for "cleaning" DICOM descriptions
# for backwards compatibility pydicom < 0.9.7 (before DICOM keywords)
# Translation is different with unicode - see .translate() at
# http://docs.python.org/library/stdtypes.html#string-methods
chars_to_remove = r""" !@#$%^&*(),;:.?\|{}[]+-="'’/"""
if in_py3: # i.e. unicode strings
translate_table = dict((ord(char), None) for char in chars_to_remove)
else:
import string
translate_table = string.maketrans('','')
def keyword_for_tag(tag):
"""Return the DICOM keyword for the given tag. Replaces old CleanName()
method using the 2011 DICOM standard keywords instead.
Will return GroupLength for group length tags,
and returns empty string ("") if the tag doesn't exist in the dictionary.
"""
try:
return dictionary_keyword(tag)
except KeyError:
return ""
def CleanName(tag):
"""Return the dictionary descriptive text string but without bad characters.
Used for e.g. *named tags* of Dataset instances (before DICOM keywords were
part of the standard)
"""
tag = Tag(tag)
if tag not in DicomDictionary:
if tag.element == 0: # 0=implied group length in DICOM versions < 3
return "GroupLength"
else:
return ""
s = dictionary_description(tag) # Descriptive name in dictionary
# remove blanks and nasty characters
if in_py3:
s = s.translate(translate_table)
else:
s = s.translate(translate_table, chars_to_remove)
# Take "Sequence" out of name (pydicom < 0.9.7)
# e..g "BeamSequence"->"Beams"; "ReferencedImageBoxSequence"->"ReferencedImageBoxes"
# 'Other Patient ID' exists as single value AND as sequence so check for it and leave 'Sequence' in
if dictionaryVR(tag) == "SQ" and not s.startswith("OtherPatientIDs"):
if s.endswith("Sequence"):
s = s[:-8]+"s"
if s.endswith("ss"):
s = s[:-1]
if s.endswith("xs"):
s = s[:-1] + "es"
if s.endswith("Studys"):
s = s[:-2]+"ies"
return s
# Provide for the 'reverse' lookup. Given clean name, what is the tag?
logger.debug("Reversing DICOM dictionary so can look up tag from a name...")
NameDict = dict([(CleanName(tag), tag) for tag in DicomDictionary])
keyword_dict = dict([(dictionary_keyword(tag), tag) for tag in DicomDictionary])
def short_name(name):
"""Return a short *named tag* for the corresponding long version.
Return a blank string if there is no short version of the name.
"""
for longname, shortname in shortNames:
if name.startswith(longname):
return name.replace(longname, shortname)
return ""
def long_name(name):
"""Return a long *named tag* for the corresponding short version.
Return a blank string if there is no long version of the name.
"""
for longname, shortname in shortNames:
if name.startswith(shortname):
return name.replace(shortname, longname)
return ""
def tag_for_name(name):
"""Return the dicom tag corresponding to name, or None if none exist."""
if name in keyword_dict: # the usual case
return keyword_dict[name]
# If not an official keyword, check the old style pydicom names
if name in NameDict:
tag = NameDict[name]
msg = ("'%s' as tag name has been deprecated; use official DICOM keyword '%s'"
% (name, dictionary_keyword(tag)))
warnings.warn(msg, DeprecationWarning)
return tag
# check if is short-form of a valid name
longname = long_name(name)
if longname:
return NameDict.get(longname, None)
return None
def all_names_for_tag(tag):
"""Return a list of all (long and short) names for the tag"""
longname = keyword_for_tag(tag)
shortname = short_name(longname)
names = [longname]
if shortname:
names.append(shortname)
return names
# PRIVATE DICTIONARY handling
# functions in analogy with those of main DICOM dict
def get_private_entry(tag, private_creator):
"""Return the tuple (VR, VM, name, is_retired) from a private dictionary"""
tag = Tag(tag)
try:
private_dict = private_dictionaries[private_creator]
except KeyError:
raise KeyError("Private creator {0} not in private dictionary".format(private_creator))
# private elements are usually agnostic for "block" (see PS3.5-2008 7.8.1 p44)
# Some elements in _private_dict are explicit; most have "xx" for high-byte of element
# Try exact key first, but then try with "xx" in block position
try:
dict_entry = private_dict[tag]
except KeyError:
# so here put in the "xx" in the block position for key to look up
group_str = "%04x" % tag.group
elem_str = "%04x" % tag.elem
key = "%sxx%s" % (group_str, elem_str[-2:])
if key not in private_dict:
raise KeyError("Tag {0} not in private dictionary for private creator {1}".format(key, private_creator))
dict_entry = private_dict[key]
return dict_entry
def private_dictionary_description(tag, private_creator):
"""Return the descriptive text for the given dicom tag."""
return get_private_entry(tag, private_creator)[2]
def private_dictionaryVM(tag, private_creator):
"""Return the dicom value multiplicity for the given dicom tag."""
return get_private_entry(tag, private_creator)[1]
def private_dictionaryVR(tag, private_creator):
"""Return the dicom value representation for the given dicom tag."""
return get_private_entry(tag, private_creator)[0]
| Python |
# dataset.py
"""Module for Dataset class
Overview of Dicom object model:
Dataset(derived class of Python's dict class)
contains DataElement instances (DataElement is a class with tag, VR, value)
the value can be a Sequence instance
(Sequence is derived from Python's list),
or just a regular value like a number, string, etc.,
or a list of regular values, e.g. a 3d coordinate
Sequence's are a list of Datasets (note recursive nature here)
"""
#
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
import sys
from sys import byteorder
sys_is_little_endian = (byteorder == 'little')
import logging
logger = logging.getLogger('pydicom')
import inspect # for __dir__
from dicom.datadict import DicomDictionary, dictionaryVR
from dicom.datadict import tag_for_name, all_names_for_tag
from dicom.tag import Tag, BaseTag
from dicom.dataelem import DataElement, DataElement_from_raw, RawDataElement
from dicom.UID import NotCompressedPixelTransferSyntaxes
import os.path
import dicom # for write_file
import dicom.charset
import warnings
have_numpy = True
try:
import numpy
except:
have_numpy = False
stat_available = True
try:
from os import stat
except:
stat_available = False
class PropertyError(Exception):
# http://docs.python.org/release/3.1.3/tutorial/errors.html#tut-userexceptions
"""For AttributeErrors caught in a property, so do not go to __getattr__"""
pass
class Dataset(dict):
"""A collection (dictionary) of Dicom `DataElement` instances.
Example of two ways to retrieve or set values:
1. dataset[0x10, 0x10].value --> patient's name
2. dataset.PatientName --> patient's name
Example (2) uses DICOM "keywords", defined starting in 2011 standard.
PatientName is not actually a member of the object, but unknown member
requests are checked against the DICOM dictionary. If the name matches a
DicomDictionary descriptive string, the corresponding tag is used
to look up or set the `DataElement` instance's value.
:attribute indent_chars: for string display, the characters used to indent
nested Data Elements (e.g. sequence items). Default is three spaces.
"""
indent_chars = " "
def add(self, data_element):
"""Equivalent to dataset[data_element.tag] = data_element."""
self[data_element.tag] = data_element
def add_new(self, tag, VR, value):
"""Create a new DataElement instance and add it to this Dataset."""
data_element = DataElement(tag, VR, value)
# use data_element.tag since DataElement verified it
self[data_element.tag] = data_element
def data_element(self, name):
"""Return the full data_element instance for the given descriptive name
:param name: a DICOM keyword
:returns: a DataElement instance in this dataset with the given name
if the tag for that name is not found, returns None
"""
tag = tag_for_name(name)
if tag:
return self[tag]
return None
def __contains__(self, name):
"""Extend dict.__contains__() to handle DICOM keywords.
This is called for code like: ``if 'SliceLocation' in dataset``.
"""
if isinstance(name, (str, unicode)):
tag = tag_for_name(name)
else:
try:
tag = Tag(name)
except:
return False
if tag:
return dict.__contains__(self, tag)
else:
return dict.__contains__(self, name) # will no doubt raise an exception
def decode(self):
"""Apply character set decoding to all data elements.
See DICOM PS3.5-2008 6.1.1.
"""
# Find specific character set. 'ISO_IR 6' is default
# May be multi-valued, but let dicom.charset handle all logic on that
dicom_character_set = self.get('SpecificCharacterSet', "ISO_IR 6")
# Shortcut to the decode function in dicom.charset
decode_data_element = dicom.charset.decode
# Callback for walk(), to decode the chr strings if necessary
# This simply calls the dicom.charset.decode function
def decode_callback(ds, data_element):
decode_data_element(data_element, dicom_character_set)
# Use the walk function to go through all elements and convert them
self.walk(decode_callback)
def __delattr__(self, name):
"""Intercept requests to delete an attribute by name, e.g. del ds.name
If name is a dicom descriptive string (cleaned with CleanName),
then delete the corresponding tag and data_element.
Else, delete an instance (python) attribute as any other class would do
"""
# First check if is a valid DICOM name and if we have that data element
tag = tag_for_name(name)
if tag and tag in self:
del self[tag]
# If not a DICOM name in this dataset, check for regular instance name
# can't do delete directly, that will call __delattr__ again
elif name in self.__dict__:
del self.__dict__[name]
# Not found, raise an error in same style as python does
else:
raise AttributeError(name)
def __dir__(self):
"""Give a list of attributes available in the dataset
List of attributes is used, for example, in auto-completion in editors
or command-line environments.
"""
meths = set(zip(*inspect.getmembers(Dataset, inspect.isroutine))[0])
props = set(zip(
*inspect.getmembers(Dataset, inspect.isdatadescriptor))[0])
dicom_names = set(self.dir())
alldir=sorted(props | meths | dicom_names)
return alldir
def dir(self, *filters):
"""Return an alphabetical list of data_element keywords in the dataset.
Intended mainly for use in interactive Python sessions.
:param filters: zero or more string arguments to the function. Used for
case-insensitive match to any part of the DICOM name.
:returns: All data_element names in this dataset matching the filters.
If no filters, return all DICOM keywords in the dataset
"""
allnames = []
for tag, data_element in self.items():
allnames.extend(all_names_for_tag(tag))
# remove blanks - tags without valid names (e.g. private tags)
allnames = [x for x in allnames if x]
# Store found names in a dict, so duplicate names appear only once
matches = {}
for filter_ in filters:
filter_ = filter_.lower()
match = [x for x in allnames if x.lower().find(filter_) != -1]
matches.update(dict([(x,1) for x in match]))
if filters:
names = sorted(matches.keys())
return names
else:
return sorted(allnames)
def get(self, key, default=None):
"""Extend dict.get() to handle DICOM keywords"""
if isinstance(key, (str, unicode)):
try:
return getattr(self, key)
except AttributeError:
return default
else:
# is not a string, try to make it into a tag and then hand it
# off to the underlying dict
if not isinstance(key, BaseTag):
try:
key = Tag(key)
except:
raise TypeError("Dataset.get key must be a string or tag")
try:
return_val = self.__getitem__(key)
except KeyError:
return_val = default
return return_val
def __getattr__(self, name):
"""Intercept requests for unknown Dataset python-attribute names.
If the name matches a Dicom keyword,
return the value for the data_element with the corresponding tag.
"""
# __getattr__ only called if instance cannot find name in self.__dict__
# So, if name is not a dicom string, then is an error
tag = tag_for_name(name)
if tag is None:
raise AttributeError("Dataset does not have attribute "
"'{0:s}'.".format(name))
tag = Tag(tag)
if tag not in self:
raise AttributeError("Dataset does not have attribute "
"'{0:s}'.".format(name))
else: # do have that dicom data_element
return self[tag].value
def __getitem__(self, key):
"""Operator for dataset[key] request."""
tag = Tag(key)
data_elem = dict.__getitem__(self, tag)
if isinstance(data_elem, DataElement):
return data_elem
elif isinstance(data_elem, tuple):
# If a deferred read, then go get the value now
if data_elem.value is None:
from dicom.filereader import read_deferred_data_element
data_elem = read_deferred_data_element(self.fileobj_type,
self.filename, self.timestamp, data_elem)
# Not converted from raw form read from file yet; do so now
self[tag] = DataElement_from_raw(data_elem)
return dict.__getitem__(self, tag)
def group_dataset(self, group):
"""Return a Dataset containing only data_elements of a certain group.
:param group: the group part of a dicom (group, element) tag.
:returns: a dataset instance containing data elements of the group
specified
"""
ds = Dataset()
ds.update(dict([(tag,data_element) for tag,data_element in self.items()
if tag.group==group]))
return ds
def __iter__(self):
"""Method to iterate through the dataset, returning data_elements.
e.g.:
for data_element in dataset:
do_something...
The data_elements are returned in DICOM order,
i.e. in increasing order by tag value.
Sequence items are returned as a single data_element; it is up to the
calling code to recurse into the Sequence items if desired
"""
# Note this is different than the underlying dict class,
# which returns the key of the key:value mapping.
# Here the value is returned (but data_element.tag has the key)
taglist = sorted(self.keys())
for tag in taglist:
yield self[tag]
def _pixel_data_numpy(self):
"""Return a NumPy array of the pixel data.
NumPy is a numerical package for python. It is used if available.
:raises TypeError: if no pixel data in this dataset.
:raises ImportError: if cannot import numpy.
"""
if not 'PixelData' in self:
raise TypeError("No pixel data found in this dataset.")
if not have_numpy:
msg = "The Numpy package is required to use pixel_array, and numpy could not be imported.\n"
raise ImportError(msg)
# determine the type used for the array
need_byteswap = (self.is_little_endian != sys_is_little_endian)
# Make NumPy format code, e.g. "uint16", "int32" etc
# from two pieces of info:
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
# self.BitsAllocated -- 8, 16, or 32
format_str = '%sint%d' % (('u', '')[self.PixelRepresentation],
self.BitsAllocated)
try:
numpy_format = numpy.dtype(format_str)
except TypeError:
raise TypeError("Data type not understood by NumPy: "
"format='%s', PixelRepresentation=%d, BitsAllocated=%d" % (
numpy_format, self.PixelRepresentation, self.BitsAllocated))
# Have correct Numpy format, so create the NumPy array
arr = numpy.fromstring(self.PixelData, numpy_format)
# XXX byte swap - may later handle this in read_file!!?
if need_byteswap:
arr.byteswap(True) # True means swap in-place, don't make a new copy
# Note the following reshape operations return a new *view* onto arr, but don't copy the data
if 'NumberOfFrames' in self and self.NumberOfFrames > 1:
if self.SamplesPerPixel > 1:
arr = arr.reshape(self.SamplesPerPixel, self.NumberOfFrames, self.Rows, self.Columns)
else:
arr = arr.reshape(self.NumberOfFrames, self.Rows, self.Columns)
else:
if self.SamplesPerPixel > 1:
if self.BitsAllocated == 8:
arr = arr.reshape(self.SamplesPerPixel, self.Rows, self.Columns)
else:
raise NotImplementedError("This code only handles SamplesPerPixel > 1 if Bits Allocated = 8")
else:
arr = arr.reshape(self.Rows, self.Columns)
return arr
# Use by pixel_array property
def _get_pixel_array(self):
# Check if pixel data is in a form we know how to make into an array
# XXX uses file_meta here, should really only be thus for FileDataset
if self.file_meta.TransferSyntaxUID not in NotCompressedPixelTransferSyntaxes :
raise NotImplementedError("Pixel Data is compressed in a format pydicom does not yet handle. Cannot return array")
# Check if already have converted to a NumPy array
# Also check if self.PixelData has changed. If so, get new NumPy array
already_have = True
if not hasattr(self, "_pixel_array"):
already_have = False
elif self._pixel_id != id(self.PixelData):
already_have = False
if not already_have:
self._pixel_array = self._pixel_data_numpy()
self._pixel_id = id(self.PixelData) # is this guaranteed to work if memory is re-used??
return self._pixel_array
@property
def pixel_array(self):
"""Return the pixel data as a NumPy array"""
try:
return self._get_pixel_array()
except AttributeError:
t, e, tb = sys.exc_info()
raise PropertyError("AttributeError in pixel_array property: " + \
e.args[0]), None, tb
# Format strings spec'd according to python string formatting options
# See http://docs.python.org/library/stdtypes.html#string-formatting-operations
default_element_format = "%(tag)s %(name)-35.35s %(VR)s: %(repval)s"
default_sequence_element_format = "%(tag)s %(name)-35.35s %(VR)s: %(repval)s"
def formatted_lines(self, element_format=default_element_format,
sequence_element_format=default_sequence_element_format,
indent_format=None):
"""A generator to give back a formatted string representing each line
one at a time. Example:
for line in dataset.formatted_lines("%(name)s=%(repval)s", "SQ:%(name)s=%(repval)s"):
print(line)
See the source code for default values which illustrate some of the names that can be used in the
format strings
indent_format -- not used in current version. Placeholder for future functionality.
"""
for data_element in self.iterall():
# Get all the attributes possible for this data element (e.g.
# gets descriptive text name too)
# This is the dictionary of names that can be used in the format string
elem_dict = dict([(x, getattr(data_element,x)()
if callable(getattr(data_element, x))
else getattr(data_element, x))
for x in dir(data_element) if not x.startswith("_")])
if data_element.VR == "SQ":
yield sequence_element_format % elem_dict
else:
yield element_format % elem_dict
def _pretty_str(self, indent=0, topLevelOnly=False):
"""Return a string of the data_elements in this dataset, with indented levels.
This private method is called by the __str__() method
for handling print statements or str(dataset), and the __repr__() method.
It is also used by top(), which is the reason for the topLevelOnly flag.
This function recurses, with increasing indentation levels.
"""
strings = []
indentStr = self.indent_chars * indent
nextIndentStr = self.indent_chars *(indent+1)
for data_element in self:
if data_element.VR == "SQ": # a sequence
strings.append(indentStr + str(data_element.tag) + " %s %i item(s) ---- " % ( data_element.description(),len(data_element.value)))
if not topLevelOnly:
for dataset in data_element.value:
strings.append(dataset._pretty_str(indent+1))
strings.append(nextIndentStr + "---------")
else:
strings.append(indentStr + repr(data_element))
return "\n".join(strings)
def remove_private_tags(self):
"""Remove all Dicom private tags in this dataset and those contained within."""
def RemoveCallback(dataset, data_element):
"""Internal method to use as callback to walk() method."""
if data_element.tag.is_private:
# can't del self[tag] - won't be right dataset on recursion
del dataset[data_element.tag]
self.walk(RemoveCallback)
def save_as(self, filename, WriteLikeOriginal=True):
"""Write the dataset to a file.
filename -- full path and filename to save the file to
WriteLikeOriginal -- see dicom.filewriter.write_file for info on this parameter.
"""
dicom.write_file(filename, self, WriteLikeOriginal)
def __setattr__(self, name, value):
"""Intercept any attempts to set a value for an instance attribute.
If name is a dicom descriptive string (cleaned with CleanName),
then set the corresponding tag and data_element.
Else, set an instance (python) attribute as any other class would do.
"""
tag = tag_for_name(name)
if tag is not None: # successfully mapped name to a tag
if tag not in self: # don't have this tag yet->create the data_element instance
VR = dictionaryVR(tag)
data_element = DataElement(tag, VR, value)
else: # already have this data_element, just changing its value
data_element = self[tag]
data_element.value = value
# Now have data_element - store it in this dict
self[tag] = data_element
else: # name not in dicom dictionary - setting a non-dicom instance attribute
# XXX note if user mis-spells a dicom data_element - no error!!!
self.__dict__[name] = value
def __setitem__(self, key, value):
"""Operator for dataset[key]=value. Check consistency, and deal with private tags"""
if not isinstance(value, (DataElement, RawDataElement)): # ok if is subclass, e.g. DeferredDataElement
raise TypeError("Dataset contents must be DataElement instances.\n" + \
"To set a data_element value use data_element.value=val")
tag = Tag(value.tag)
if key != tag:
raise ValueError("data_element.tag must match the dictionary key")
data_element = value
if tag.is_private:
# See PS 3.5-2008 section 7.8.1 (p. 44) for how blocks are reserved
logger.debug("Setting private tag %r" % tag)
private_block = tag.elem >> 8
private_creator_tag = Tag(tag.group, private_block)
if private_creator_tag in self and tag != private_creator_tag:
if isinstance(data_element, RawDataElement):
data_element = DataElement_from_raw(data_element)
data_element.private_creator = self[private_creator_tag].value
dict.__setitem__(self, tag, data_element)
def __str__(self):
"""Handle str(dataset)."""
return self._pretty_str()
def top(self):
"""Show the DICOM tags, but only the top level; do not recurse into Sequences"""
return self._pretty_str(topLevelOnly=True)
def trait_names(self):
"""Return a list of valid names for auto-completion code
Used in IPython, so that data element names can be found
and offered for autocompletion on the IPython command line
"""
return dir(self) # only valid python >=2.6, else use self.__dir__()
def update(self, dictionary):
"""Extend dict.update() to handle DICOM keywords."""
for key, value in dictionary.items():
if isinstance(key, (str, unicode)):
setattr(self, key, value)
else:
self[Tag(key)] = value
def iterall(self):
"""Iterate through the dataset, yielding all data elements.
Unlike Dataset.__iter__, this *does* recurse into sequences,
and so returns all data elements as if the file were "flattened".
"""
for data_element in self:
yield data_element
if data_element.VR == "SQ":
sequence = data_element.value
for dataset in sequence:
for elem in dataset.iterall():
yield elem
def walk(self, callback):
"""Call the given function for all dataset data_elements (recurses).
Visit all data_elements, recurse into sequences and their datasets,
The callback function is called for each data_element
(including SQ element).
Can be used to perform an operation on certain types of data_elements.
E.g., `remove_private_tags`() finds all private tags and deletes them.
:param callback: a callable taking two arguments: a dataset, and
a data_element belonging to that dataset.
`DataElement`s will come back in DICOM order (by increasing tag number
within their dataset)
"""
taglist = sorted(self.keys())
for tag in taglist:
data_element = self[tag]
callback(self, data_element) # self = this Dataset
# 'tag in self' below needed in case callback deleted data_element
if tag in self and data_element.VR == "SQ":
sequence = data_element.value
for dataset in sequence:
dataset.walk(callback)
__repr__ = __str__
class FileDataset(Dataset):
def __init__(self, filename_or_obj, dataset, preamble=None, file_meta=None,
is_implicit_VR=True, is_little_endian=True):
"""Initialize a dataset read from a DICOM file
:param filename: full path and filename to the file. Use None if is a BytesIO.
:param dataset: some form of dictionary, usually a Dataset from read_dataset()
:param preamble: the 128-byte DICOM preamble
:param file_meta: the file meta info dataset, as returned by _read_file_meta,
or an empty dataset if no file meta information is in the file
:param is_implicit_VR: True if implicit VR transfer syntax used; False if explicit VR. Default is True.
:param is_little_endian: True if little-endian transfer syntax used; False if big-endian. Default is True.
"""
Dataset.__init__(self, dataset)
self.preamble = preamble
self.file_meta = file_meta
self.is_implicit_VR = is_implicit_VR
self.is_little_endian = is_little_endian
if isinstance(filename_or_obj, basestring):
self.filename = filename_or_obj
self.fileobj_type = file
else:
self.fileobj_type = filename_or_obj.__class__ # use __class__ python <2.7?; http://docs.python.org/reference/datamodel.html
if getattr(filename_or_obj, "name", False):
self.filename = filename_or_obj.name
elif getattr(filename_or_obj, "filename", False): #gzip python <2.7?
self.filename = filename_or_obj.filename
else:
self.filename = None # e.g. came from BytesIO or something file-like
self.timestamp = None
if stat_available and self.filename and os.path.exists(self.filename):
statinfo = stat(self.filename)
self.timestamp = statinfo.st_mtime
| Python |
# tag.py
"""Define Tag class to hold a dicom (group, element) tag"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# Store the 4 bytes of a dicom tag as an arbitary length integer
# (python "long" in python <3; "int" for python >=3).
# NOTE: This must be not be stored as a tuple internally, as some code logic
# (e.g. in write_AT of filewriter) checks if a value is a multi-value element
# So, represent as a single number and separate to (group, element) when necessary.
def Tag(arg, arg2=None):
"""General function for creating a Tag in any of the standard forms:
e.g. Tag(0x00100010), Tag(0x10,0x10), Tag((0x10, 0x10))
"""
if arg2 is not None:
arg = (arg, arg2) # act as if was passed a single tuple
if isinstance(arg, (tuple, list)):
if len(arg) != 2:
raise ValueError("Tag must be an int or a 2-tuple")
if isinstance(arg[0], (str, unicode)): # py2to3: unicode not needed in py3
if not isinstance(arg[1], (str, unicode)): # py3: ditto
raise ValueError("Both arguments must be hex strings if one is")
arg = (int(arg[0], 16), int(arg[1], 16))
if arg[0] > 0xFFFF or arg[1] > 0xFFFF:
raise OverflowError("Groups and elements of tags must each be <=2 byte integers")
long_value = (arg[0] << 16) | arg[1]
elif isinstance(arg, (str, unicode)): # py2to3: unicode not needed in pure py3
raise ValueError("Tags cannot be instantiated from a single string")
else: # given a single number to use as a tag, as if (group, elem) already joined to a long
long_value = arg
if long_value > 0xFFFFFFFFL:
raise OverflowError("Tags are limited to 32-bit length; tag {0!r}".format(arg))
return BaseTag(long_value)
# py2to3: for some reason, the BaseTag class derived directly from long below
# was not converted by 2to3, but conversion does work with this next line
BaseTag_base_class = long # converted to "int" by 2to3
class BaseTag(BaseTag_base_class):
"""Class for storing the dicom (group, element) tag"""
# Override comparisons so can convert "other" to Tag as necessary
# See Ordering Comparisons at http://docs.python.org/dev/3.0/whatsnew/3.0.html
def __lt__(self, other):
# Check if comparing with another Tag object; if not, create a temp one
if not isinstance(other, BaseTag):
try:
other = Tag(other)
except:
raise TypeError("Cannot compare Tag with non-Tag item")
return long(self) < long(other)
def __eq__(self, other):
# Check if comparing with another Tag object; if not, create a temp one
if not isinstance(other, BaseTag):
try:
other = Tag(other)
except:
raise TypeError("Cannot compare Tag with non-Tag item")
return long(self) == long(other)
def __ne__(self, other):
# Check if comparing with another Tag object; if not, create a temp one
if not isinstance(other, BaseTag):
try:
other = Tag(other)
except:
raise TypeError("Cannot compare Tag with non-Tag item")
return long(self) != long(other)
# For python 3, any override of __cmp__ or __eq__ immutable requires
# explicit redirect of hash function to the parent class
# See http://docs.python.org/dev/3.0/reference/datamodel.html#object.__hash__
__hash__ = long.__hash__
def __str__(self):
"""String of tag value as (gggg, eeee)"""
return "({0:04x}, {1:04x})".format(self.group, self.element)
__repr__ = __str__
@property
def group(self):
return self >>16
@property
def element(self):
"""Return the element part of the (group,element) tag"""
return self & 0xffff
elem = element # alternate syntax
@property
def is_private(self):
"""Return a boolean to indicate whether the tag is a private tag (odd group number)"""
return self.group % 2 == 1
def TupleTag(group_elem):
"""Fast factory for BaseTag object with known safe (group, element) tuple"""
long_value = group_elem[0] << 16 | group_elem[1]
return BaseTag(long_value)
# Define some special tags:
# See PS 3.5-2008 section 7.5 (p.40)
ItemTag = TupleTag((0xFFFE, 0xE000)) # start of Sequence Item
ItemDelimiterTag = TupleTag((0xFFFE, 0xE00D)) # end of Sequence Item
SequenceDelimiterTag = TupleTag((0xFFFE,0xE0DD)) # end of Sequence of undefined length
| Python |
# fileutil.py
"""Functions for reading to certain bytes, e.g. delimiters"""
# Copyright (c) 2009-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from struct import pack, unpack
from dicom.tag import TupleTag, Tag
from dicom.datadict import dictionary_description
import logging
logger = logging.getLogger('pydicom')
def absorb_delimiter_item(fp, is_little_endian, delimiter):
"""Read (and ignore) undefined length sequence or item terminators."""
if is_little_endian:
struct_format = "<HHL"
else:
struct_format = ">HHL"
group, elem, length = unpack(struct_format, fp.read(8))
tag = TupleTag((group, elem))
if tag != delimiter:
msg = "Did not find expected delimiter '%s'" % dictionary_description(delimiter)
msg += ", instead found %s at file position 0x%x" %(str(tag), fp.tell()-8)
logger.warn(msg)
fp.seek(fp.tell()-8)
return
logger.debug("%04x: Found Delimiter '%s'", fp.tell()-8, dictionary_description(delimiter))
if length == 0:
logger.debug("%04x: Read 0 bytes after delimiter", fp.tell()-4)
else:
logger.debug("%04x: Expected 0x00000000 after delimiter, found 0x%x", fp.tell()-4, length)
def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
"""Read in the file until a specific byte sequence found
bytes_to_find -- a string containing the bytes to find. Must be in correct
endian order already
read_size -- number of bytes to read at a time
"""
data_start = fp.tell()
search_rewind = len(bytes_to_find)-1
found = False
EOF = False
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
# try again - if still don't get required amount, this is last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
EOF = True # but will still check whatever we did get
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
elif EOF:
if rewind:
fp.seek(data_start)
return None
else:
fp.seek(fp.tell()-search_rewind) # rewind a bit in case delimiter crossed read_size boundary
# if get here then have found the byte string
found_at = chunk_start + index
if rewind:
fp.seek(data_start)
return found_at
def read_undefined_length_value(fp, is_little_endian, delimiter_tag, defer_size=None,
read_size=128):
"""Read until the delimiter tag found and return the value, ignore the delimiter
fp -- a file-like object with read(), seek() functions
is_little_endian -- True if file transfer syntax is little endian, else False
read_size -- number of bytes to read at one time (default 128)
On completion, the file will be set to the first byte after the delimiter and its
following four zero bytes.
If end-of-file is hit before the delimiter was found, raises EOFError
"""
data_start = fp.tell()
search_rewind = 3
if is_little_endian:
bytes_format = b"<HH"
else:
bytes_format = b">HH"
bytes_to_find = pack(bytes_format, delimiter_tag.group, delimiter_tag.elem)
found = False
EOF = False
value_chunks = []
byte_count = 0 # for defer_size checks
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
# try again - if still don't get required amount, this is last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
EOF = True # but will still check whatever we did get
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
new_bytes = bytes_read[:index]
byte_count += len(new_bytes)
if defer_size is None or byte_count < defer_size:
value_chunks.append(bytes_read[:index])
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
length = fp.read(4)
if length != b"\0\0\0\0":
msg = "Expected 4 zero bytes after undefined length delimiter at pos {0:04x}"
logger.error(msg.format(fp.tell()-4))
elif EOF:
fp.seek(data_start)
raise EOFError("End of file reached before delimiter {0!r} found".format(delimiter_tag))
else:
fp.seek(fp.tell()-search_rewind) # rewind a bit in case delimiter crossed read_size boundary
# accumulate the bytes read (not including the rewind)
new_bytes = bytes_read[:-search_rewind]
byte_count += len(new_bytes)
if defer_size is None or byte_count < defer_size:
value_chunks.append(new_bytes)
# if get here then have found the byte string
if defer_size is not None and defer_size >= defer_size:
return None
else:
return b"".join(value_chunks)
def find_delimiter(fp, delimiter, is_little_endian, read_size=128, rewind=True):
"""Return file position where 4-byte delimiter is located.
Return None if reach end of file without finding the delimiter.
On return, file position will be where it was before this function,
unless rewind argument is False.
"""
struct_format = "<H"
if not is_little_endian:
struct_format = ">H"
delimiter = Tag(delimiter)
bytes_to_find = pack(struct_format, delimiter.group) + pack(struct_format, delimiter.elem)
return find_bytes(fp, bytes_to_find, rewind=rewind)
def length_of_undefined_length(fp, delimiter, is_little_endian, read_size=128, rewind=True):
"""Search through the file to find the delimiter, return the length of the data
element.
Return the file to the start of the data, ready to read it.
Note the data element that the delimiter starts is not read here, the calling
routine must handle that.
delimiter must be 4 bytes long
rewind == if True, file will be returned to position before seeking the bytes
"""
chunk = 0
data_start = fp.tell()
delimiter_pos = find_delimiter(fp, delimiter, is_little_endian, rewind=rewind)
length = delimiter_pos - data_start
return length
def read_delimiter_item(fp, delimiter):
"""Read and ignore an expected delimiter.
If the delimiter is not found or correctly formed, a warning is logged.
"""
found = fp.read(4)
if found != delimiter:
logger.warn("Expected delimitor %s, got %s at file position 0x%x", Tag(delimiter), Tag(found), fp.tell()-4)
length = fp.read_UL()
if length != 0:
logger.warn("Expected delimiter item to have length 0, got %d at file position 0x%x", length, fp.tell()-4)
| Python |
# encaps.py
"""Routines for working with encapsulated (compressed) data
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# Encapsulated Pixel Data -- 3.5-2008 A.4
# Encapsulated Pixel data is in a number of Items (start with Item tag (0xFFFE,E000) and ending ultimately with SQ delimiter and Item Length field of 0 (no value),
# just like SQ of undefined length, but here Item must have explicit length.
# PixelData length is Undefined Length if encapsulated
# First item is an Offset Table. It can have 0 length and no value, or it can have a table of US pointers to first byte of the Item tag starting each *Frame*,
# where 0 of pointer is at first Item tag following the Offset table
# If a single frame, it may be 0 length/no value, or it may have a single pointer (0).
import logging
logger = logging.getLogger('pydicom')
from dicom.filebase import DicomBytesIO
from dicom.tag import ItemTag, SequenceDelimiterTag
def defragment_data(data):
"""Read encapsulated data and return one continuous string
data -- string of encapsulated data, typically dataset.PixelData
Return all fragments concatenated together as a byte string
If PixelData has multiple frames, then should separate out before calling this routine.
"""
# Convert data into a memory-mapped file
fp = DicomBytesIO(data)
fp.is_little_endian = True # DICOM standard requires this
BasicOffsetTable = read_item(fp)
seq = []
while True:
item = read_item(fp)
if not item: # None is returned if get to Sequence Delimiter
break
seq.append(item)
# XXX should
return "".join(seq)
# read_item modeled after filereader.ReadSequenceItem
def read_item(fp):
"""Read and return a single Item in the fragmented data stream"""
try:
tag = fp.read_tag()
except EOFError: # already read delimiter before passing data here, so should just run out
return None
if tag == SequenceDelimiterTag: # No more items, time for sequence to stop reading
length = fp.read_UL()
logger.debug("%04x: Sequence Delimiter, length 0x%x", fp.tell()-8, length)
if length != 0:
logger.warning("Expected 0x00000000 after delimiter, found 0x%x, at data position 0x%x", length, fp.tell()-4)
return None
if tag != ItemTag:
logger.warning("Expected Item with tag %s at data position 0x%x", ItemTag, fp.tell()-4)
length = fp.read_UL()
else:
length = fp.read_UL()
logger.debug("%04x: Item, length 0x%x", fp.tell()-8, length)
if length == 0xFFFFFFFFL:
raise ValueError("Encapsulated data fragment had Undefined Length at data position 0x%x" % fp.tell()-4)
item_data = fp.read(length)
return item_data
| Python |
#!/usr/bin/python
""" dicom_dao
Data Access Objects for persisting PyDicom DataSet objects.
Currently we support couchdb through the DicomCouch class.
Limitations:
- Private tags are discarded
TODO:
- Unit tests with multiple objects open at a time
- Unit tests with rtstruct objects
- Support for mongodb (mongo has more direct support for binary data)
Dependencies:
- PyDicom
- python-couchdb
- simplejson
Tested with:
- PyDicom 0.9.4-1
- python-couchdb 0.6
- couchdb 0.10.1
- simplejson 2.0.9
"""
#
# Copyright (c) 2010 Michael Wallace
# This file is released under the pydicom license.
# See the file license.txt included with the pydicom distribution, also
# available at http://pydicom.googlecode.com
#
import hashlib
import os
import string
import simplejson
import couchdb
import dicom
def uid2str(uid):
""" Convert PyDicom uid to a string """
return repr(uid).strip("'")
# When reading files a VR of 'US or SS' is left as binary, because we
# don't know how to interpret the values different numbers. We therefore
# treat it as binary and will continue to until either pydicom works it out
# for us, or we figure out a test.
BINARY_VR_VALUES = ['OW', 'OB', 'OW/OB', 'US or SS']
class DicomCouch(dict):
""" A Data Access Object for persisting PyDicom objects into CouchDB
We follow the same pattern as the python-couchdb library for getting and
setting documents, for example storing dicom.dataset.Dataset object dcm:
db = DicomCouch('http://localhost:5984/', 'dbname')
db[dcm.SeriesInstanceUID] = dcm
The only constraints on the key are that it must be json-serializable and
unique within the database instance. In theory it should be possible to
use any DICOM UID. Unfortunately I have written this code under the
assumption that SeriesInstanceUID will always be used. This will be fixed.
Retrieving object with key 'foo':
dcm = db['foo']
Deleting object with key 'foo':
dcm = db['foo']
db.delete(dcm)
TODO:
- It is possible to have couchdb assign a uid when adding objects. This
should be supported.
"""
def __init__(self, server, db):
""" Create connection to couchdb server/db """
super(DicomCouch, self).__init__()
self._meta = {}
server = couchdb.Server(server)
try:
self._db = server[db]
except couchdb.client.ResourceNotFound:
self._db = server.create(db)
def __getitem__(self, key):
""" Retrieve DICOM object with specified SeriesInstanceUID """
doc = self._db[key]
dcm = json2pydicom(doc)
if dcm.SeriesInstanceUID not in self._meta:
self._meta[dcm.SeriesInstanceUID] = {}
self._meta[dcm.SeriesInstanceUID]['hashes'] = {}
if '_attachments' in doc:
self.__get_attachments(dcm, doc)
_set_meta_info_dcm(dcm)
# Keep a copy of the couch doc for use in DELETE operations
self._meta[dcm.SeriesInstanceUID]['doc'] = doc
return dcm
def __setitem__(self, key, dcm):
""" Write the supplied DICOM object to the database """
try:
dcm.PixelData = dcm.pixel_array.tostring()
except AttributeError:
pass # Silently ignore errors due to pixel_array not existing
except NotImplementedError:
pass # Silently ignore attempts to modify compressed pixel data
except TypeError:
pass # Silently ignore errors due to PixelData not existing
jsn, binary_elements, file_meta_binary_elements = pydicom2json(dcm)
_strip_elements(jsn, binary_elements)
_strip_elements(jsn['file_meta'], file_meta_binary_elements)
if dcm.SeriesInstanceUID in self._meta:
self.__set_meta_info_jsn(jsn, dcm)
try: # Actually write to the db
self._db[key] = jsn
except TypeError as type_error:
if str(type_error) == 'string indices must be integers, not str':
pass
if dcm.SeriesInstanceUID not in self._meta:
self._meta[dcm.SeriesInstanceUID] = {}
self._meta[dcm.SeriesInstanceUID]['hashes'] = {}
self.__put_attachments(dcm, binary_elements, jsn)
# Get a local copy of the document
# We get this from couch because we get the _id, _rev and _attachments
# keys which will ensure we don't overwrite the attachments we just
# uploaded.
# I don't really like the extra HTTP GET and I think we can generate
# what we need without doing it. Don't have time to work out how yet.
self._meta[dcm.SeriesInstanceUID]['doc'] = \
self._db[dcm.SeriesInstanceUID]
def __str__(self):
""" Return the string representation of the couchdb client """
return str(self._db)
def __repr__(self):
""" Return the canonical string representation of the couchdb client """
return repr(self._db)
def __get_attachments(self, dcm, doc):
""" Set binary tags by retrieving attachments from couchdb.
Values are hashed so they are only updated if they have changed.
"""
for id in doc['_attachments'].keys():
tagstack = id.split(':')
value = self._db.get_attachment(doc['_id'], id)
_add_element(dcm, tagstack, value)
self._meta[dcm.SeriesInstanceUID]['hashes'][id] = hashlib.md5(value)
def __put_attachments(self, dcm, binary_elements, jsn):
""" Upload all new and modified attachments """
elements_to_update = \
[(tagstack, item) for tagstack, item in binary_elements \
if self.__attachment_update_needed(\
dcm, _tagstack2id(tagstack + [item.tag]), item)]
for tagstack, element in elements_to_update:
id = _tagstack2id(tagstack + [element.tag])
self._db.put_attachment(jsn, element.value, id)
self._meta[dcm.SeriesInstanceUID]['hashes'][id] = \
hashlib.md5(element.value)
def delete(self, dcm):
""" Delete from database and remove meta info from the DAO """
self._db.delete(self._meta[dcm.SeriesInstanceUID]['doc'])
self._meta.pop(dcm.SeriesInstanceUID)
def __set_meta_info_jsn(self, jsn, dcm):
""" Set the couch-specific meta data for supplied dict """
jsn['_rev'] = self._meta[dcm.SeriesInstanceUID]['doc']['_rev']
if '_attachments' in self._meta[dcm.SeriesInstanceUID]['doc']:
jsn['_attachments'] = \
self._meta[dcm.SeriesInstanceUID]['doc']['_attachments']
def __attachment_update_needed(self, dcm, id, binary_element):
""" Compare hashes for binary element and return true if different """
try:
hashes = self._meta[dcm.SeriesInstanceUID]['hashes']
except KeyError:
return True # If no hashes dict then attachments do not exist
if id not in hashes or hashes[id].digest() != \
hashlib.md5(binary_element.value).digest():
return True
else:
return False
def _add_element(dcm, tagstack, value):
""" Add element with tag, vr and value to dcm at location tagstack """
current_node = dcm
for item in tagstack[:-1]:
try:
address = int(item)
except ValueError:
address = dicom.tag.Tag(__str2tag(item))
current_node = current_node[address]
tag = __str2tag(tagstack[-1])
vr = dicom.datadict.dictionaryVR(tag)
current_node[tag] = dicom.dataelem.DataElement(tag, vr, value)
def _tagstack2id(tagstack):
""" Convert a list of tags to a unique (within document) attachment id """
return string.join([str(tag) for tag in tagstack], ':')
def _strip_elements(jsn, elements):
""" Remove supplied elements from the dict object
We use this with a list of binary elements so that we don't store
empty tags in couchdb when we are already storing the binary data as
attachments.
"""
for tagstack, element in elements:
if len(tagstack) == 0:
jsn.pop(element.tag)
else:
current_node = jsn
for tag in tagstack:
current_node = current_node[tag]
current_node.pop(element.tag)
def _set_meta_info_dcm(dcm):
""" Set the file metadata DataSet attributes
This is done by PyDicom when we dicom.read_file(foo) but we need to do it
ourselves when creating a DataSet from scratch, otherwise we cannot use
foo.pixel_array or dicom.write_file(foo).
This code is lifted from PyDicom.
"""
TransferSyntax = dcm.file_meta.TransferSyntaxUID
if TransferSyntax == dicom.UID.ExplicitVRLittleEndian:
dcm.is_implicit_vr = False
dcm.is_little_endian = True # This line not in PyDicom
elif TransferSyntax == dicom.UID.ImplicitVRLittleEndian:
dcm.is_implicit_vr = True
dcm.is_little_endian = True
elif TransferSyntax == dicom.UID.ExplicitVRBigEndian:
dcm.is_implicit_vr = False
dcm.is_little_endian = False
elif TransferSyntax == dicom.UID.DeflatedExplicitVRLittleEndian:
dcm.is_implicit_vr = False # Deleted lines above as it relates
dcm.is_little_endian = True # to reading compressed file data.
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE by
# Standard PS 3.5-2008 A.4 (p63)
dcm.is_implicit_vr = False
dcm.is_little_endian = True
def pydicom2json(dcm):
""" Convert the supplied PyDicom object into a json-serializable dict
Binary elements cannot be represented in json so we return these as
as separate list of the tuple (tagstack, element), where:
- element = dicom.dataelem.DataElement
- tagstack = list of tags/sequence IDs that address the element
The tagstack variable means we know the absolute address of each binary
element. We then use this as the attachment id in couchdb - when we
retrieve the attachment we can then insert it at the appropriate point in
the tree.
"""
dcm.remove_private_tags() # No support for now
dcm.decode() # Convert to unicode
binary_elements = []
tagstack = []
jsn = dict((key, __jsonify(dcm[key], binary_elements, tagstack))
for key in dcm.keys())
file_meta_binary_elements = []
jsn['file_meta'] = dict((key, __jsonify(dcm.file_meta[key],
file_meta_binary_elements, tagstack))
for key in dcm.file_meta.keys())
return jsn, binary_elements, file_meta_binary_elements
def __jsonify(element, binary_elements, tagstack):
""" Convert key, value to json-serializable types
Recursive, so if value is key/value pairs then all children will get
converted
"""
value = element.value
if element.VR in BINARY_VR_VALUES:
binary_elements.append((tagstack[:], element))
return ''
elif type(value) == list:
new_list = [__typemap(listvalue) for listvalue in value]
return new_list
elif type(value) == dicom.sequence.Sequence:
tagstack.append(element.tag)
nested_data = []
for i in range(0, len(value)):
tagstack.append(i)
nested_data.append(dict(\
(subkey, __jsonify(value[i][subkey], binary_elements, tagstack))
for subkey in value[i].keys()))
tagstack.pop()
tagstack.pop()
return nested_data
else:
return __typemap(value)
def __typemap(value):
""" Map PyDicom types that won't serialise to JSON types """
if type(value) == dicom.UID.UID:
return uid2str(value)
elif isinstance(value, dicom.tag.BaseTag):
return long(value)
else:
return value
def json2pydicom(jsn):
""" Convert the supplied json dict into a PyDicom object """
dataset = dicom.dataset.Dataset()
# Don't try to convert couch specific tags
dicom_keys = [key for key in jsn.keys() \
if key not in ['_rev', '_id', '_attachments', 'file_meta']]
for key in dicom_keys:
dataset.add(__dicomify(key, jsn[key]))
file_meta = dicom.dataset.Dataset()
for key in jsn['file_meta']:
file_meta.add(__dicomify(key, jsn['file_meta'][key]))
dataset.file_meta = file_meta
return dataset
def __dicomify(key, value):
""" Convert a json key, value to a PyDicom DataElement """
tag = __str2tag(key)
if tag.element == 0: # 0 tag implies group length (filreader.py pydicom)
vr = 'UL'
else:
vr = dicom.datadict.dictionaryVR(tag)
if vr == 'OW/OB': # Always write pixel data as bytes
vr = 'OB' # rather than words
if vr == 'US or SS': # US or SS is up to us as the data is already
if value < 0: # decoded. We therefore choose US, unless we
vr = 'SS' # need a signed value.
else:
vr = 'US'
if vr == 'SQ': # We have a sequence of datasets, so we recurse
return dicom.dataelem.DataElement(tag, vr,
dicom.sequence.Sequence([
__make_dataset(
[__dicomify(subkey, listvalue[subkey])
for subkey in listvalue.keys()
])
for listvalue in value
]))
else:
return dicom.dataelem.DataElement(tag, vr, value)
def __make_dataset(data_elements):
""" Create a Dataset from a list of DataElement objects """
dataset = dicom.dataset.Dataset()
for element in data_elements:
dataset.add(element)
return dataset
def __str2tag(key):
""" Convert string representation of a tag into a Tag """
return dicom.tag.Tag((int(key[1:5], 16), int(key[7:-1], 16)))
if __name__ == '__main__':
TESTDB = 'dicom_test'
SERVER = 'http://127.0.0.1:5984'
# Delete test database if it already exists
couch = couchdb.Server(SERVER)
try:
couch.delete(TESTDB)
except couchdb.client.ResourceNotFound:
pass # Don't worry if it didn't exist
db = DicomCouch(SERVER, TESTDB)
testfiles_dir = '../testfiles'
testfiles = os.listdir('../testfiles')
testfiles = filter(lambda x:x.endswith('dcm'), testfiles)
testfiles = map(lambda x:os.path.join('../testfiles', x), testfiles)
for dcmfile in testfiles:
dcm = dicom.read_file(dcmfile)
db[dcm.SeriesInstanceUID] = dcm
| Python |
# pydicom_PIL.py
"""View DICOM images using Python image Library (PIL)
Usage:
>>> import dicom
>>> from dicom.contrib.pydicom_PIL import show_PIL
>>> ds = dicom.read_file("filename")
>>> show_PIL(ds)
Requires Numpy: http://numpy.scipy.org/
and Python Imaging Library: http://www.pythonware.com/products/pil/
"""
# Copyright (c) 2009 Darcy Mason, Adit Panchal
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# Based on image.py from pydicom version 0.9.3,
# LUT code added by Adit Panchal
# Tested on Python 2.5.4 (32-bit) on Mac OS X 10.6
# using numpy 1.3.0 and PIL 1.1.7b1
have_PIL=True
try:
import PIL.Image
except:
have_PIL = False
have_numpy=True
try:
import numpy as np
except:
have_numpy = False
have_numpy=True
try:
import numpy as np
except:
have_numpy = False
def get_LUT_value(data, window, level):
"""Apply the RGB Look-Up Table for the given data and window/level value."""
if not have_numpy:
raise ImportError("Numpy is not available. See http://numpy.scipy.org/ to download and install")
return np.piecewise(data,
[data <= (level - 0.5 - (window-1)/2),
data > (level - 0.5 + (window-1)/2)],
[0, 255, lambda data: ((data - (level - 0.5))/(window-1) + 0.5)*(255-0)])
def get_LUT_value(data, window, level):
"""Apply the RGB Look-Up Table for the given data and window/level value."""
if not have_numpy:
raise ImportError("Numpy is not available. See http://numpy.scipy.org/ to download and install")
return np.piecewise(data,
[data <= (level - 0.5 - (window-1)/2),
data > (level - 0.5 + (window-1)/2)],
[0, 255, lambda data: ((data - (level - 0.5))/(window-1) + 0.5)*(255-0)])
# Display an image using the Python Imaging Library (PIL)
def show_PIL(dataset):
if not have_PIL:
raise ImportError("Python Imaging Library is not available. See http://www.pythonware.com/products/pil/ to download and install")
if ('PixelData' not in dataset):
raise TypeError("Cannot show image -- DICOM dataset does not have pixel data")
if ('WindowWidth' not in dataset) or ('WindowCenter' not in dataset): # can only apply LUT if these values exist
bits = dataset.BitsAllocated
samples = dataset.SamplesPerPixel
if bits == 8 and samples == 1:
mode = "L"
elif bits == 8 and samples == 3:
mode = "RGB"
elif bits == 16:
mode = "I;16" # not sure about this -- PIL source says is 'experimental' and no documentation. Also, should bytes swap depending on endian of file and system??
else:
raise TypeError("Don't know PIL mode for %d BitsAllocated and %d SamplesPerPixel" % (bits, samples))
# PIL size = (width, height)
size = (dataset.Columns, dataset.Rows)
im = PIL.Image.frombuffer(mode, size, dataset.PixelData, "raw", mode, 0, 1) # Recommended to specify all details by http://www.pythonware.com/library/pil/handbook/image.htm
else:
image = get_LUT_value(dataset.pixel_array, dataset.WindowWidth, dataset.WindowCenter)
im = PIL.Image.fromarray(image).convert('L') # Convert mode to L since LUT has only 256 values: http://www.pythonware.com/library/pil/handbook/image.htm
im.show()
| Python |
#pydicom_Tkinter.py
#
# Copyright (c) 2009 Daniel Nanz
# This file is released under the pydicom (http://code.google.com/p/pydicom/)
# license, see the file license.txt available at
# (http://code.google.com/p/pydicom/)
#
# revision history:
# Dec-08-2009: version 0.1
#
# 0.1: tested with pydicom version 0.9.3, Python version 2.6.2 (32-bit)
# under Windows XP Professional 2002, and Mac OS X 10.5.5,
# using numpy 1.3.0 and a small random selection of MRI and
# CT images.
'''
View DICOM images from pydicom
requires numpy: http://numpy.scipy.org/
Usage:
------
>>> import dicom # pydicom
>>> import dicom.contrib.pydicom_Tkinter as pydicom_Tkinter # this module
>>> df = dicom.read_file(filename)
>>> pydicom_Tkinter.show_image(df)
'''
import Tkinter
import tempfile
import os
have_numpy = True
try:
import numpy as np
except:
# will not work...
have_numpy = False
def get_PGM_bytedata_string(arr):
'''Given a 2D numpy array as input write gray-value image data in the PGM
format into a byte string and return it.
arr: single-byte unsigned int numpy array
note: Tkinter's PhotoImage object seems to accept only single-byte data
'''
if arr.dtype != np.uint8:
raise ValueError
if len(arr.shape) != 2:
raise ValueError
# array.shape is (#rows, #cols) tuple; PGM input needs this reversed
col_row_string = ' '.join(reversed(map(str, arr.shape)))
bytedata_string = '\n'.join(('P5',
col_row_string,
str(arr.max()),
arr.tostring()))
return bytedata_string
def get_PGM_from_numpy_arr(arr, window_center, window_width,
lut_min=0, lut_max=255):
'''real-valued numpy input -> PGM-image formatted byte string
arr: real-valued numpy array to display as grayscale image
window_center, window_width: to define max/min values to be mapped to the
lookup-table range. WC/WW scaling is done
according to DICOM-3 specifications.
lut_min, lut_max: min/max values of (PGM-) grayscale table: do not change
'''
if np.isreal(arr).sum() != arr.size:
raise ValueError
# currently only support 8-bit colors
if lut_max != 255:
raise ValueError
if arr.dtype != np.float64:
arr = arr.astype(np.float64)
# LUT-specific array scaling
# width >= 1 (DICOM standard)
window_width = max(1, window_width)
wc, ww = np.float64(window_center), np.float64(window_width)
lut_range = np.float64(lut_max) - lut_min
minval = wc - 0.5 - (ww - 1.0) / 2.0
maxval = wc - 0.5 + (ww - 1.0) / 2.0
min_mask = (minval >= arr)
to_scale = (arr > minval) & (arr < maxval)
max_mask = (arr >= maxval)
if min_mask.any(): arr[min_mask] = lut_min
if to_scale.any(): arr[to_scale] = ((arr[to_scale] - (wc - 0.5)) /
(ww - 1.0) + 0.5) * lut_range + lut_min
if max_mask.any(): arr[max_mask] = lut_max
# round to next integer values and convert to unsigned int
arr = np.rint(arr).astype(np.uint8)
# return PGM byte-data string
return get_PGM_bytedata_string(arr)
def get_tkinter_photoimage_from_pydicom_image(data):
'''
Wrap data.pixel_array in a Tkinter PhotoImage instance,
after conversion into a PGM grayscale image.
This will fail if the "numpy" module is not installed in the attempt of
creating the data.pixel_array.
data: object returned from pydicom.read_file()
side effect: may leave a temporary .pgm file on disk
'''
# get numpy array as representation of image data
arr = data.pixel_array.astype(np.float64)
# pixel_array seems to be the original, non-rescaled array.
# If present, window center and width refer to rescaled array
# -> do rescaling if possible.
if ('RescaleIntercept' in data) and ('RescaleSlope' in data):
intercept = data.RescaleIntercept # single value
slope = data.RescaleSlope #
arr = slope * arr + intercept
# get default window_center and window_width values
wc = (arr.max() + arr.min()) / 2.0
ww = arr.max() - arr.min() + 1.0
# overwrite with specific values from data, if available
if ('WindowCenter' in data) and ('WindowWidth' in data):
wc = data.WindowCenter
ww = data.WindowWidth
try:
wc = wc[0] # can be multiple values
except:
pass
try:
ww = ww[0]
except:
pass
# scale array to account for center, width and PGM grayscale range,
# and wrap into PGM formatted ((byte-) string
pgm = get_PGM_from_numpy_arr(arr, wc, ww)
# create a PhotoImage
# for as yet unidentified reasons the following fails for certain
# window center/width values:
# photo_image = Tkinter.PhotoImage(data=pgm, gamma=1.0)
# Error with Python 2.6.2 under Windows XP:
# (self.tk.call(('image', 'create', imgtype, name,) + options)
# _tkinter.TclError: truncated PPM data
# OsX: distorted images
# while all seems perfectly OK for other values of center/width or when
# the PGM is first written to a temporary file and read again
# write PGM file into temp dir
(os_id, abs_path) = tempfile.mkstemp(suffix='.pgm')
with open(abs_path, 'wb') as fd:
fd.write(pgm)
photo_image = Tkinter.PhotoImage(file=abs_path, gamma=1.0)
# close and remove temporary file on disk
# os.close is needed under windows for os.remove not to fail
try:
os.close(os_id)
os.remove(abs_path)
except:
pass # silently leave file on disk in temp-like directory
return photo_image
def show_image(data, block=True, master=None):
'''
Get minimal Tkinter GUI and display a pydicom data.pixel_array
data: object returned from pydicom.read_file()
block: if True run Tk mainloop() to show the image
master: use with block==False and an existing Tk widget as parent widget
side effects: may leave a temporary .pgm file on disk
'''
frame = Tkinter.Frame(master=master, background='#000')
if 'SeriesDescription' in data and 'InstanceNumber' in data:
title = ', '.join(('Ser: ' + data.SeriesDescription,
'Img: ' + str(data.InstanceNumber)))
else:
title = 'pydicom image'
frame.master.title(title)
photo_image = get_tkinter_photoimage_from_pydicom_image(data)
label = Tkinter.Label(frame, image=photo_image, background='#000')
# keep a reference to avoid disappearance upon garbage collection
label.photo_reference = photo_image
label.grid()
frame.grid()
if block==True:
frame.mainloop()
| Python |
#==========================================================================
# imViewer-Simple.py
#
# An example program that opens uncompressed DICOM images and
# converts them via numPy and PIL to be viewed in wxWidgets GUI
# apps. The conversion is currently:
#
# pydicom->NumPy->PIL->wxPython.Image->wxPython.Bitmap
#
# Gruesome but it mostly works. Surely there is at least one
# of these steps that could be eliminated (probably PIL) but
# haven't tried that yet and I may want some of the PIL manipulation
# functions.
#
# This won't handle RLE, embedded JPEG-Lossy, JPEG-lossless,
# JPEG2000, old ACR/NEMA files, or anything wierd. Also doesn't
# handle some RGB images that I tried.
#
# Have added Adit Panchal's LUT code. It helps a lot, but needs
# to be further generalized. Added test for window and/or level
# as 'list' type - crude, but it worked for a bunch of old MR and
# CT slices I have.
#
# Testing: minimal
# Tried only on WinXP sp2 using numpy 1.3.0
# and PIL 1.1.7b1, Python 2.6.4, and wxPython 2.8.10.1
#
# Dave Witten: Nov. 11, 2009
#==========================================================================
import os
import os.path
import sys
import dicom
import wx
have_PIL = True
try:
import PIL.Image
except:
have_PIL = False
have_numpy = True
try:
import numpy as np
except:
have_numpy = False
#----------------------------------------------------------------
# Initialize image capabilities.
#----------------------------------------------------------------
wx.InitAllImageHandlers()
#----------------------------------------------------------------
# MsgDlg()
#----------------------------------------------------------------
def MsgDlg(window, string, caption='OFAImage', style=wx.YES_NO|wx.CANCEL):
"""Common MessageDialog."""
dlg = wx.MessageDialog(window, string, caption, style)
result = dlg.ShowModal()
dlg.Destroy()
return result
#=======================================================
# class ImFrame
#=======================================================
class ImFrame(wx.Frame):
"""Class for main window."""
#------------------------------------------------------------
# ImFrame.__init__()
#------------------------------------------------------------
def __init__(self, parent, title):
"""Create the pydicom image example's main frame window."""
wx.Frame.__init__(self,
parent,
id = -1,
title = "",
pos = wx.DefaultPosition,
size = wx.Size(w=1024, h=768),
style = wx.DEFAULT_FRAME_STYLE | wx.SUNKEN_BORDER | wx.CLIP_CHILDREN)
#--------------------------------------------------------
# Set up the menubar.
#--------------------------------------------------------
self.mainmenu = wx.MenuBar()
# Make the 'File' menu.
menu = wx.Menu()
item = menu.Append(wx.ID_ANY, '&Open', 'Open file for editing')
self.Bind(wx.EVT_MENU, self.OnFileOpen, item)
item = menu.Append(wx.ID_ANY, 'E&xit', 'Exit Program')
self.Bind(wx.EVT_MENU, self.OnFileExit, item)
self.mainmenu.Append(menu, '&File')
# Attach the menu bar to the window.
self.SetMenuBar(self.mainmenu)
#--------------------------------------------------------
# Set up the main splitter window.
#--------------------------------------------------------
self.mainSplitter = wx.SplitterWindow(self, style=wx.NO_3D | wx.SP_3D)
self.mainSplitter.SetMinimumPaneSize(1)
#-------------------------------------------------------------
# Create the folderTreeView on the left.
#-------------------------------------------------------------
self.dsTreeView = wx.TreeCtrl(self.mainSplitter, style=wx.TR_LINES_AT_ROOT | wx.TR_HAS_BUTTONS)
#--------------------------------------------------------
# Create the ImageView on the right pane.
#--------------------------------------------------------
self.imView = wx.Panel(self.mainSplitter, style=wx.VSCROLL | wx.HSCROLL | wx.CLIP_CHILDREN)
self.imView.Bind(wx.EVT_PAINT, self.OnPaint)
self.imView.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.imView.Bind(wx.EVT_SIZE, self.OnSize)
#--------------------------------------------------------
# Install the splitter panes.
#--------------------------------------------------------
self.mainSplitter.SplitVertically(self.dsTreeView, self.imView)
self.mainSplitter.SetSashPosition(300, True)
#--------------------------------------------------------
# Initialize some values
#--------------------------------------------------------
self.dcmdsRoot = False
self.foldersRoot = False
self.loadCentered = True
self.bitmap = None
self.Show(True)
#------------------------------------------------------------
# ImFrame.OnFileExit()
#------------------------------------------------------------
def OnFileExit(self, event):
"""Exits the program."""
self.Destroy()
event.Skip()
#------------------------------------------------------------
# ImFrame.OnSize()
#------------------------------------------------------------
def OnSize(self, event):
"Window 'size' event."
self.Refresh()
#------------------------------------------------------------
# ImFrame.OnEraseBackground()
#------------------------------------------------------------
def OnEraseBackground(self, event):
"Window 'erase background' event."
pass
#------------------------------------------------------------
# ImFrame.populateTree()
#------------------------------------------------------------
def populateTree(self, ds):
""" Populate the tree in the left window with the [desired]
dataset values"""
if not self.dcmdsRoot:
self.dcmdsRoot = self.dsTreeView.AddRoot(text="DICOM Objects")
else:
self.dsTreeView.DeleteChildren(self.dcmdsRoot)
self.recurse_tree(ds, self.dcmdsRoot)
self.dsTreeView.ExpandAll()
#------------------------------------------------------------
# ImFrame.recurse_tree()
#------------------------------------------------------------
def recurse_tree(self, ds, parent, hide=False):
""" order the dicom tags """
for data_element in ds:
if isinstance(data_element.value, unicode):
ip = self.dsTreeView.AppendItem(parent, text=unicode(data_element))
else:
ip = self.dsTreeView.AppendItem(parent, text=str(data_element))
if data_element.VR == "SQ":
for i, ds in enumerate(data_element.value):
sq_item_description = data_element.name.replace(" Sequence", "")
item_text = "%s %d" % (sq_item_description, i+1)
parentNodeID = self.dsTreeView.AppendItem(ip, text=item_text.rjust(128))
self.recurse_tree(ds, parentNodeID)
## --- Most of what is important happens below this line ---------------------
#------------------------------------------------------------
# ImFrame.OnFileOpen()
#------------------------------------------------------------
def OnFileOpen(self, event):
"""Opens a selected file."""
dlg = wx.FileDialog(self, 'Choose a file to add.', '', '', '*.*', wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
fullPath = dlg.GetPath()
imageFile = dlg.GetFilename()
#checkDICMHeader()
self.show_file(imageFile, fullPath)
#------------------------------------------------------------
# ImFrame.OnPaint()
#------------------------------------------------------------
def OnPaint(self, event):
"Window 'paint' event."
dc = wx.PaintDC(self.imView)
dc = wx.BufferedDC(dc)
# paint a background just so it isn't *so* boring.
dc.SetBackground(wx.Brush("WHITE"))
dc.Clear()
dc.SetBrush(wx.Brush("GREY", wx.CROSSDIAG_HATCH))
windowsize = self.imView.GetSizeTuple()
dc.DrawRectangle(0, 0, windowsize[0], windowsize[1])
bmpX0 = 0
bmpY0 = 0
if(self.bitmap != None):
if self.loadCentered:
bmpX0 = (windowsize[0] - self.bitmap.Width) / 2
bmpY0 = (windowsize[1] - self.bitmap.Height) / 2
dc.DrawBitmap(self.bitmap, bmpX0, bmpY0, False)
#------------------------------------------------------------
# ImFrame.ConvertWXToPIL()
# Expropriated from Andrea Gavana's
# ShapedButton.py in the wxPython dist
#------------------------------------------------------------
def ConvertWXToPIL(self, bmp):
""" Convert wx.Image Into PIL Image. """
width = bmp.GetWidth()
height = bmp.GetHeight()
im = wx.EmptyImage(width, height)
im.fromarray("RGBA", (width, height), bmp.GetData())
return img
#------------------------------------------------------------
# ImFrame.ConvertPILToWX()
# Expropriated from Andrea Gavana's
# ShapedButton.py in the wxPython dist
#------------------------------------------------------------
def ConvertPILToWX(self, pil, alpha=True):
""" Convert PIL Image Into wx.Image. """
if alpha:
image = apply(wx.EmptyImage, pil.size)
image.SetData(pil.convert("RGB").tostring())
image.SetAlphaData(pil.convert("RGBA").tostring()[3::4])
else:
image = wx.EmptyImage(pil.size[0], pil.size[1])
new_image = pil.convert('RGB')
data = new_image.tostring()
image.SetData(data)
return image
#-----------------------------------------------------------
# ImFrame.get_LUT_value()
#-----------------------------------------------------------
def get_LUT_value(self, data, window, level):
"""Apply the RGB Look-Up Table for the given data and window/level value."""
if not have_numpy:
raise ImportError("Numpy is not available. See http://numpy.scipy.org/ to download and install")
if isinstance(window, list):
window = window[0]
if isinstance(level, list):
level = level[0]
return np.piecewise(
data,
[data <= (level - 0.5 - (window-1)/2), data > (level - 0.5 + (window-1)/2)],
[0, 255, lambda data: ((data - (level - 0.5))/(window-1) + 0.5)*(255-0)]
)
#-----------------------------------------------------------
# ImFrame.loadPIL_LUT(dataset)
# Display an image using the Python Imaging Library (PIL)
#-----------------------------------------------------------
def loadPIL_LUT(self, dataset):
if not have_PIL:
raise ImportError("Python Imaging Library is not available. See http://www.pythonware.com/products/pil/ to download and install")
if('PixelData' not in dataset):
raise TypeError("Cannot show image -- DICOM dataset does not have pixel data")
if('WindowWidth' not in dataset) or ('WindowCenter' not in dataset): # can only apply LUT if these values exist
bits = dataset.BitsAllocated
samples = dataset.SamplesPerPixel
if bits == 8 and samples == 1:
mode = "L"
elif bits == 8 and samples == 3:
mode = "RGB"
elif bits == 16: # not sure about this -- PIL source says is 'experimental' and no documentation.
mode = "I;16" # Also, should bytes swap depending on endian of file and system??
else:
raise TypeError("Don't know PIL mode for %d BitsAllocated and %d SamplesPerPixel" % (bits, samples))
size = (dataset.Columns, dataset.Rows)
im = PIL.Image.frombuffer(mode, size, dataset.PixelData, "raw", mode, 0, 1) # Recommended to specify all details by http://www.pythonware.com/library/pil/handbook/image.htm
else:
image = self.get_LUT_value(dataset.pixel_array, dataset.WindowWidth, dataset.WindowCenter)
im = PIL.Image.fromarray(image).convert('L') # Convert mode to L since LUT has only 256 values: http://www.pythonware.com/library/pil/handbook/image.htm
return im
#------------------------------------------------------------
# ImFrame.show_file()
#------------------------------------------------------------
def show_file(self, imageFile, fullPath):
""" Load the DICOM file, make sure it contains at least one
image, and set it up for display by OnPaint(). ** be
careful not to pass a unicode string to read_file or it will
give you 'fp object does not have a defer_size attribute,
or some such."""
ds = dicom.read_file(str(fullPath))
ds.decode() # change strings to unicode
self.populateTree(ds)
if 'PixelData' in ds:
self.dImage = self.loadPIL_LUT(ds)
if self.dImage != None:
tmpImage = self.ConvertPILToWX(self.dImage, False)
self.bitmap = wx.BitmapFromImage(tmpImage)
self.Refresh()
##------ This is just the initialization of the App -------------------------
#=======================================================
# The main App Class.
#=======================================================
class App(wx.App):
"""Image Application."""
#------------------------------------------------------------
# App.OnInit()
#------------------------------------------------------------
def OnInit(self):
"""Create the Image Application."""
frame = ImFrame(None, 'wxImage Example')
return True
#---------------------------------------------------------------------
# If this file is running as main or a standalone test, begin execution here.
#---------------------------------------------------------------------
if __name__ == '__main__':
app = App(0)
app.MainLoop()
| Python |
# dicom_series.py
"""
By calling the function read_files with a directory name or list
of files as an argument, a list of DicomSeries instances can be
obtained. A DicomSeries object has some attributes that give
information about the serie (such as shape, sampling, suid) and
has an info attribute, which is a dicom.DataSet instance containing
information about the first dicom file in the serie. The data can
be obtained using the get_pixel_array() method, which produces a
3D numpy array if there a multiple files in the serie.
This module can deal with gated data, in which case a DicomSeries
instance is created for each 3D volume.
"""
#
# Copyright (c) 2010 Almar Klein
# This file is released under the pydicom license.
# See the file license.txt included with the pydicom distribution, also
# available at http://pydicom.googlecode.com
#
# I (Almar) performed some test to loading a series of data
# in two different ways: loading all data, and deferring loading
# the data. Both ways seem equally fast on my system. I have to
# note that results can differ quite a lot depending on the system,
# but still I think this suggests that deferred reading is in
# general not slower. I think deferred loading of the pixel data
# can be advantageous because maybe not all data of all series
# is needed. Also it simply saves memory, because the data is
# removed from the Dataset instances.
# In the few result below, cold means reading for the first time,
# warm means reading 2nd/3d/etc time.
# - Full loading of data, cold: 9 sec
# - Full loading of data, warm: 3 sec
# - Deferred loading of data, cold: 9 sec
# - Deferred loading of data, warm: 3 sec
import os, sys, time, gc
import dicom
from dicom.sequence import Sequence
# Try importing numpy
try:
import numpy as np
have_numpy = True
except Exception:
np = None
have_numpy = False
## Helper functions and classes
class ProgressBar:
""" To print progress to the screen.
"""
def __init__(self, char='-', length=20):
self.char = char
self.length = length
self.progress = 0.0
self.nbits = 0
self.what = ''
def Start(self, what=''):
""" Start(what='')
Start the progress bar, displaying the given text first.
Make sure not to print anything untill after calling
Finish(). Messages can be printed while displaying
progess by using printMessage().
"""
self.what = what
self.progress = 0.0
self.nbits = 0
sys.stdout.write(what+" [")
def Stop(self, message=""):
""" Stop the progress bar where it is now.
Optionally print a message behind it."""
delta = int(self.length - self.nbits)
sys.stdout.write( " "*delta + "] " + message + "\n")
def Finish(self, message=""):
""" Finish the progress bar, setting it to 100% if it
was not already. Optionally print a message behind the bar.
"""
delta = int(self.length-self.nbits)
sys.stdout.write( self.char*delta + "] " + message + "\n")
def Update(self, newProgress):
""" Update progress. Progress is given as a number
between 0 and 1.
"""
self.progress = newProgress
required = self.length*(newProgress)
delta = int(required-self.nbits)
if delta>0:
sys.stdout.write(self.char*delta)
self.nbits += delta
def PrintMessage(self, message):
""" Print a message (for example a warning).
The message is printed behind the progress bar,
and a new bar is started.
"""
self.Stop(message)
self.Start(self.what)
def _dummyProgressCallback(progress):
""" A callback to indicate progress that does nothing. """
pass
_progressBar = ProgressBar()
def _progressCallback(progress):
""" The default callback for displaying progress. """
if isinstance(progress, basestring):
_progressBar.Start(progress)
_progressBar._t0 = time.time()
elif progress is None:
dt = time.time() - _progressBar._t0
_progressBar.Finish('%2.2f seconds' % dt)
else:
_progressBar.Update(progress)
def _listFiles(files, path):
"""List all files in the directory, recursively. """
for item in os.listdir(path):
item = os.path.join(path, item)
if os.path.isdir( item ):
_listFiles(files, item)
else:
files.append(item)
def _splitSerieIfRequired(serie, series):
""" _splitSerieIfRequired(serie, series)
Split the serie in multiple series if this is required.
The choice is based on examing the image position relative to
the previous image. If it differs too much, it is assumed
that there is a new dataset. This can happen for example in
unspitted gated CT data.
"""
# Sort the original list and get local name
serie._sort()
L = serie._datasets
# Init previous slice
ds1 = L[0]
# Check whether we can do this
if not "ImagePositionPatient" in ds1:
return
# Initialize a list of new lists
L2 = [[ds1]]
# Init slice distance estimate
distance = 0
for index in range(1,len(L)):
# Get current slice
ds2 = L[index]
# Get positions
pos1 = ds1.ImagePositionPatient[2]
pos2 = ds2.ImagePositionPatient[2]
# Get distances
newDist = abs(pos1 - pos2)
#deltaDist = abs(firstPos-pos2)
# If the distance deviates more than 2x from what we've seen,
# we can agree it's a new dataset.
if distance and newDist > 2.1*distance:
L2.append([])
distance = 0
else:
# Test missing file
if distance and newDist > 1.5*distance:
print 'Warning: missing file after "%s"' % ds1.filename
distance = newDist
# Add to last list
L2[-1].append( ds2 )
# Store previous
ds1 = ds2
# Split if we should
if len(L2) > 1:
# At what position are we now?
i = series.index(serie)
# Create new series
series2insert = []
for L in L2:
newSerie = DicomSeries(serie.suid, serie._showProgress)
newSerie._datasets = Sequence(L)
series2insert.append(newSerie)
# Insert series and remove self
for newSerie in reversed(series2insert):
series.insert(i, newSerie)
series.remove(serie)
pixelDataTag = dicom.tag.Tag(0x7fe0, 0x0010)
def _getPixelDataFromDataset(ds):
""" Get the pixel data from the given dataset. If the data
was deferred, make it deferred again, so that memory is
preserved. Also applies RescaleSlope and RescaleIntercept
if available. """
# Get original element
el = dict.__getitem__(ds, pixelDataTag)
# Get data
data = ds.pixel_array
# Remove data (mark as deferred)
dict.__setitem__(ds, pixelDataTag, el)
del ds._pixel_array
# Obtain slope and offset
slope = 1
offset = 0
needFloats = False
needApplySlopeOffset = False
if 'RescaleSlope' in ds:
needApplySlopeOffset = True
slope = ds.RescaleSlope
if 'RescaleIntercept' in ds:
needApplySlopeOffset = True
offset = ds.RescaleIntercept
if int(slope)!= slope or int(offset) != offset:
needFloats = True
if not needFloats:
slope, offset = int(slope), int(offset)
# Apply slope and offset
if needApplySlopeOffset:
# Maybe we need to change the datatype?
if data.dtype in [np.float32, np.float64]:
pass
elif needFloats:
data = data.astype(np.float32)
else:
# Determine required range
minReq, maxReq = data.min(), data.max()
minReq = min([minReq, minReq*slope+offset, maxReq*slope+offset])
maxReq = max([maxReq, minReq*slope+offset, maxReq*slope+offset])
# Determine required datatype from that
dtype = None
if minReq<0:
# Signed integer type
maxReq = max([-minReq, maxReq])
if maxReq < 2**7:
dtype = np.int8
elif maxReq < 2**15:
dtype = np.int16
elif maxReq < 2**31:
dtype = np.int32
else:
dtype = np.float32
else:
# Unsigned integer type
if maxReq < 2**8:
dtype = np.int8
elif maxReq < 2**16:
dtype = np.int16
elif maxReq < 2**32:
dtype = np.int32
else:
dtype = np.float32
# Change datatype
if dtype != data.dtype:
data = data.astype(dtype)
# Apply slope and offset
data *= slope
data += offset
# Done
return data
## The public functions and classes
def read_files(path, showProgress=False, readPixelData=False):
""" read_files(path, showProgress=False, readPixelData=False)
Reads dicom files and returns a list of DicomSeries objects, which
contain information about the data, and can be used to load the
image or volume data.
The parameter "path" can also be a list of files or directories.
If the callable "showProgress" is given, it is called with a single
argument to indicate the progress. The argument is a string when a
progress is started (indicating what is processed). A float indicates
progress updates. The paremeter is None when the progress is finished.
When "showProgress" is True, a default callback is used that writes
to stdout. By default, no progress is shown.
if readPixelData is True, the pixel data of all series is read. By
default the loading of pixeldata is deferred until it is requested
using the DicomSeries.get_pixel_array() method. In general, both
methods should be equally fast.
"""
# Init list of files
files = []
# Obtain data from the given path
if isinstance(path, basestring):
# Make dir nice
basedir = os.path.abspath(path)
# Check whether it exists
if not os.path.isdir(basedir):
raise ValueError('The given path is not a valid directory.')
# Find files recursively
_listFiles(files, basedir)
elif isinstance(path, (tuple, list)):
# Iterate over all elements, which can be files or directories
for p in path:
if os.path.isdir(p):
_listFiles(files, os.path.abspath(p))
elif os.path.isfile(p):
files.append(p)
else:
print "Warning, the path '%s' is not valid." % p
else:
raise ValueError('The path argument must be a string or list.')
# Set default progress callback?
if showProgress is True:
showProgress = _progressCallback
if not hasattr(showProgress, '__call__'):
showProgress = _dummyProgressCallback
# Set defer size
deferSize = 16383 # 128**2-1
if readPixelData:
deferSize = None
# Gather file data and put in DicomSeries
series = {}
count = 0
showProgress('Loading series information:')
for filename in files:
# Skip DICOMDIR files
if filename.count("DICOMDIR"):
continue
# Try loading dicom ...
try:
dcm = dicom.read_file( filename, deferSize )
except dicom.filereader.InvalidDicomError:
continue # skip non-dicom file
except Exception as why:
if showProgress is _progressCallback:
_progressBar.PrintMessage(str(why))
else:
print 'Warning:', why
continue
# Get SUID and register the file with an existing or new series object
try:
suid = dcm.SeriesInstanceUID
except AttributeError:
continue # some other kind of dicom file
if suid not in series:
series[suid] = DicomSeries(suid, showProgress)
series[suid]._append(dcm)
# Show progress (note that we always start with a 0.0)
showProgress( float(count) / len(files) )
count += 1
# Finish progress
showProgress( None )
# Make a list and sort, so that the order is deterministic
series = series.values()
series.sort(key=lambda x:x.suid)
# Split series if necessary
for serie in reversed([serie for serie in series]):
_splitSerieIfRequired(serie, series)
# Finish all series
showProgress('Analysing series')
series_ = []
for i in range(len(series)):
try:
series[i]._finish()
series_.append(series[i])
except Exception:
pass # Skip serie (probably report-like file without pixels)
showProgress(float(i+1)/len(series))
showProgress(None)
return series_
class DicomSeries(object):
""" DicomSeries
This class represents a serie of dicom files that belong together.
If these are multiple files, they represent the slices of a volume
(like for CT or MRI). The actual volume can be obtained using loadData().
Information about the data can be obtained using the info attribute.
"""
# To create a DicomSeries object, start by making an instance and
# append files using the "_append" method. When all files are
# added, call "_sort" to sort the files, and then "_finish" to evaluate
# the data, perform some checks, and set the shape and sampling
# attributes of the instance.
def __init__(self, suid, showProgress):
# Init dataset list and the callback
self._datasets = Sequence()
self._showProgress = showProgress
# Init props
self._suid = suid
self._info = None
self._shape = None
self._sampling = None
@property
def suid(self):
""" The Series Instance UID. """
return self._suid
@property
def shape(self):
""" The shape of the data (nz, ny, nx).
If None, the serie contains a single dicom file. """
return self._shape
@property
def sampling(self):
""" The sampling (voxel distances) of the data (dz, dy, dx).
If None, the serie contains a single dicom file. """
return self._sampling
@property
def info(self):
""" A DataSet instance containing the information as present in the
first dicomfile of this serie. """
return self._info
@property
def description(self):
""" A description of the dicom series. Used fields are
PatientName, shape of the data, SeriesDescription,
and ImageComments.
"""
info = self.info
# If no info available, return simple description
if info is None:
return "DicomSeries containing %i images" % len(self._datasets)
fields = []
# Give patient name
if 'PatientName' in info:
fields.append(""+info.PatientName)
# Also add dimensions
if self.shape:
tmp = [str(d) for d in self.shape]
fields.append( 'x'.join(tmp) )
# Try adding more fields
if 'SeriesDescription' in info:
fields.append("'"+info.SeriesDescription+"'")
if 'ImageComments' in info:
fields.append("'"+info.ImageComments+"'")
# Combine
return ' '.join(fields)
def __repr__(self):
adr = hex(id(self)).upper()
return "<DicomSeries with %i images at %s>" % (len(self._datasets), adr)
def get_pixel_array(self):
""" get_pixel_array()
Get (load) the data that this DicomSeries represents, and return
it as a numpy array. If this serie contains multiple images, the
resulting array is 3D, otherwise it's 2D.
If RescaleSlope and RescaleIntercept are present in the dicom info,
the data is rescaled using these parameters. The data type is chosen
depending on the range of the (rescaled) data.
"""
# Can we do this?
if not have_numpy:
msg = "The Numpy package is required to use get_pixel_array.\n"
raise ImportError(msg)
# It's easy if no file or if just a single file
if len(self._datasets)==0:
raise ValueError('Serie does not contain any files.')
elif len(self._datasets)==1:
ds = self._datasets[0]
slice = _getPixelDataFromDataset( ds )
return slice
# Check info
if self.info is None:
raise RuntimeError("Cannot return volume if series not finished.")
# Set callback to update progress
showProgress = self._showProgress
# Init data (using what the dicom packaged produces as a reference)
ds = self._datasets[0]
slice = _getPixelDataFromDataset( ds )
#vol = Aarray(self.shape, self.sampling, fill=0, dtype=slice.dtype)
vol = np.zeros(self.shape, dtype=slice.dtype)
vol[0] = slice
# Fill volume
showProgress('Loading data:')
ll = self.shape[0]
for z in range(1,ll):
ds = self._datasets[z]
vol[z] = _getPixelDataFromDataset(ds)
showProgress(float(z)/ll)
# Finish
showProgress(None)
# Done
gc.collect()
return vol
def _append(self, dcm):
""" _append(dcm)
Append a dicomfile (as a dicom.dataset.FileDataset) to the series.
"""
self._datasets.append(dcm)
def _sort(self):
""" sort()
Sort the datasets by instance number.
"""
self._datasets.sort(key=lambda k: k.InstanceNumber)
def _finish(self):
""" _finish()
Evaluate the series of dicom files. Together they should make up
a volumetric dataset. This means the files should meet certain
conditions. Also some additional information has to be calculated,
such as the distance between the slices. This method sets the
attributes for "shape", "sampling" and "info".
This method checks:
* that there are no missing files
* that the dimensions of all images match
* that the pixel spacing of all images match
"""
# The datasets list should be sorted by instance number
L = self._datasets
if len(L)==0:
return
elif len(L) < 2:
# Set attributes
ds = self._datasets[0]
self._info = self._datasets[0]
self._shape = [ds.Rows, ds.Columns]
self._sampling = [ds.PixelSpacing[0], ds.PixelSpacing[1]]
return
# Get previous
ds1 = L[0]
# Init measures to calculate average of
distance_sum = 0.0
# Init measures to check (these are in 2D)
dimensions = ds1.Rows, ds1.Columns
sampling = ds1.PixelSpacing[0], ds1.PixelSpacing[1] # row, column
for index in range(len(L)):
# The first round ds1 and ds2 will be the same, for the
# distance calculation this does not matter
# Get current
ds2 = L[index]
# Get positions
pos1 = ds1.ImagePositionPatient[2]
pos2 = ds2.ImagePositionPatient[2]
# Update distance_sum to calculate distance later
distance_sum += abs(pos1 - pos2)
# Test measures
dimensions2 = ds2.Rows, ds2.Columns
sampling2 = ds2.PixelSpacing[0], ds2.PixelSpacing[1]
if dimensions != dimensions2:
# We cannot produce a volume if the dimensions match
raise ValueError('Dimensions of slices does not match.')
if sampling != sampling2:
# We can still produce a volume, but we should notify the user
msg = 'Warning: sampling does not match.'
if self._showProgress is _progressCallback:
_progressBar.PrintMessage(msg)
else:
print msg
# Store previous
ds1 = ds2
# Create new dataset by making a deep copy of the first
info = dicom.dataset.Dataset()
firstDs = self._datasets[0]
for key in firstDs.keys():
if key != (0x7fe0, 0x0010):
el = firstDs[key]
info.add_new(el.tag, el.VR, el.value)
# Finish calculating average distance
# (Note that there are len(L)-1 distances)
distance_mean = distance_sum / (len(L)-1)
# Store information that is specific for the serie
self._shape = [len(L), ds2.Rows, ds2.Columns]
self._sampling = [distance_mean, ds2.PixelSpacing[0],
ds2.PixelSpacing[1]]
# Store
self._info = info
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print "Expected a single argument: a directory with dicom files in it"
else:
adir = sys.argv[1]
t0 = time.time()
all_series = read_files(adir, None, False)
print "Summary of each series:"
for series in all_series:
print series.description
| Python |
# __init__.py
# Mark the folder as a python package
| Python |
# filewriter.py
"""Write a dicom media file."""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from struct import pack
import logging
logger = logging.getLogger('pydicom')
from dicom.UID import ExplicitVRLittleEndian, ImplicitVRLittleEndian, ExplicitVRBigEndian
from dicom.filebase import DicomFile
from dicom.datadict import dictionaryVR
from dicom.dataset import Dataset
from dicom.dataelem import DataElement
from dicom.tag import Tag, ItemTag, ItemDelimiterTag, SequenceDelimiterTag
from dicom.sequence import Sequence
from dicom.valuerep import extra_length_VRs
def write_numbers(fp, data_element, struct_format):
"""Write a "value" of type struct_format from the dicom file.
"Value" can be more than one number.
struct_format -- the character format as used by the struct module.
"""
endianChar = '><'[fp.is_little_endian]
value = data_element.value
if value == "":
return # don't need to write anything for empty string
format_string = endianChar + struct_format
try:
try:
value.append # works only if list, not if string or number
except: # is a single value - the usual case
fp.write(pack(format_string, value))
else:
for val in value:
fp.write(pack(format_string, val))
except Exception as e:
raise IOError("{0}\nfor data_element:\n{1}".format(str(e), str(data_elemesnt)))
def write_OBvalue(fp, data_element):
"""Write a data_element with VR of 'other byte' (OB)."""
fp.write(data_element.value)
def write_OWvalue(fp, data_element):
"""Write a data_element with VR of 'other word' (OW).
Note: This **does not currently do the byte swapping** for Endian state.
"""
# XXX for now just write the raw bytes without endian swapping
fp.write(data_element.value)
def write_UI(fp, data_element):
"""Write a data_element with VR of 'unique identifier' (UI)."""
write_string(fp, data_element, '\0') # pad with 0-byte to even length
def multi_string(val):
"""Put a string together with delimiter if has more than one value"""
if isinstance(val, (list, tuple)):
return b"\\".join(val) # \ is escape chr, so "\\" gives single backslash
else:
return val
def write_string(fp, data_element, padding=' '):
"""Write a single or multivalued string."""
val = multi_string(data_element.value)
if len(val) % 2 != 0:
val = val + padding # pad to even length
fp.write(val)
def write_number_string(fp, data_element, padding = ' '):
"""Handle IS or DS VR - write a number stored as a string of digits."""
# If the DS or IS has an original_string attribute, use that, so that
# unchanged data elements are written with exact string as when read from file
val = data_element.value
if isinstance(val, (list, tuple)):
val = b"\\".join((x.original_string if hasattr(x, 'original_string')
else str(x) for x in val))
else:
val = val.original_string if hasattr(val, 'original_string') else str(val)
if len(val) % 2 != 0:
val = val + padding # pad to even length
fp.write(val)
def write_data_element(fp, data_element):
"""Write the data_element to file fp according to dicom media storage rules."""
fp.write_tag(data_element.tag)
VR = data_element.VR
if not fp.is_implicit_VR:
if len(VR) != 2:
msg = "Cannot write ambiguous VR of '%s' for data element with tag %r." % (VR, data_element.tag)
msg += "\nSet the correct VR before writing, or use an implicit VR transfer syntax"
raise ValueError(msg)
fp.write(VR)
if VR in extra_length_VRs:
fp.write_US(0) # reserved 2 bytes
if VR not in writers:
raise NotImplementedError("write_data_element: unknown Value Representation '{0}'".format(VR))
length_location = fp.tell() # save location for later.
if not fp.is_implicit_VR and VR not in ['OB', 'OW', 'OF', 'SQ', 'UT', 'UN']:
fp.write_US(0) # Explicit VR length field is only 2 bytes
else:
fp.write_UL(0xFFFFFFFFL) # will fill in real length value later if not undefined length item
try:
writers[VR][0] # if writer is a tuple, then need to pass a number format
except TypeError:
writers[VR](fp, data_element) # call the function to write that kind of item
else:
writers[VR][0](fp, data_element, writers[VR][1])
# print DataElement(tag, VR, value)
is_undefined_length = False
if hasattr(data_element, "is_undefined_length") and data_element.is_undefined_length:
is_undefined_length = True
location = fp.tell()
fp.seek(length_location)
if not fp.is_implicit_VR and VR not in ['OB', 'OW', 'OF', 'SQ', 'UT', 'UN']:
fp.write_US(location - length_location - 2) # 2 is length of US
else:
# write the proper length of the data_element back in the length slot, unless is SQ with undefined length.
if not is_undefined_length:
fp.write_UL(location - length_location - 4) # 4 is length of UL
fp.seek(location) # ready for next data_element
if is_undefined_length:
fp.write_tag(SequenceDelimiterTag)
fp.write_UL(0) # 4-byte 'length' of delimiter data item
def write_dataset(fp, dataset):
"""Write a Dataset dictionary to the file. Return the total length written."""
fpStart = fp.tell()
# data_elements must be written in tag order
tags = sorted(dataset.keys())
for tag in tags:
write_data_element(fp, dataset[tag])
return fp.tell() - fpStart
def write_sequence(fp, data_element):
"""Write a dicom Sequence contained in data_element to the file fp."""
# write_data_element has already written the VR='SQ' (if needed) and
# a placeholder for length"""
sequence = data_element.value
for dataset in sequence:
write_sequence_item(fp, dataset)
def write_sequence_item(fp, dataset):
"""Write an item (dataset) in a dicom Sequence to the dicom file fp."""
# see Dicom standard Part 5, p. 39 ('03 version)
# This is similar to writing a data_element, but with a specific tag for Sequence Item
fp.write_tag(ItemTag) # marker for start of Sequence Item
length_location = fp.tell() # save location for later.
fp.write_UL(0xffffffffL) # will fill in real value later if not undefined length
write_dataset(fp, dataset)
if getattr(dataset, "is_undefined_length_sequence_item", False):
fp.write_tag(ItemDelimiterTag)
fp.write_UL(0) # 4-bytes 'length' field for delimiter item
else: # we will be nice and set the lengths for the reader of this file
location = fp.tell()
fp.seek(length_location)
fp.write_UL(location - length_location - 4) # 4 is length of UL
fp.seek(location) # ready for next data_element
def write_UN(fp, data_element):
"""Write a byte string for an DataElement of value 'UN' (unknown)."""
fp.write(data_element.value)
def write_ATvalue(fp, data_element):
"""Write a data_element tag to a file."""
try:
iter(data_element.value) # see if is multi-valued AT; # Note will fail if Tag ever derived from true tuple rather than being a long
except TypeError:
tag = Tag(data_element.value) # make sure is expressed as a Tag instance
fp.write_tag(tag)
else:
tags = [Tag(tag) for tag in data_element.value]
for tag in tags:
fp.write_tag(tag)
def _write_file_meta_info(fp, meta_dataset):
"""Write the dicom group 2 dicom storage File Meta Information to the file.
The file should already be positioned past the 128 byte preamble.
Raises ValueError if the required data_elements (elements 2,3,0x10,0x12)
are not in the dataset. If the dataset came from a file read with
read_file(), then the required data_elements should already be there.
"""
fp.write(b'DICM')
# File meta info is always LittleEndian, Explicit VR. After will change these
# to the transfer syntax values set in the meta info
fp.is_little_endian = True
fp.is_implicit_VR = False
if Tag((2,1)) not in meta_dataset:
meta_dataset.add_new((2,1), b'OB', b"\0\1") # file meta information version
# Now check that required meta info tags are present:
missing = []
for element in [2, 3, 0x10, 0x12]:
if Tag((2, element)) not in meta_dataset:
missing.append(Tag((2, element)))
if missing:
raise ValueError("Missing required tags {0} for file meta information".format(str(missing)))
# Put in temp number for required group length, save current location to come back
meta_dataset[(2,0)] = DataElement((2,0), 'UL', 0) # put 0 to start
group_length_data_element_size = 12 # !based on DICOM std ExplVR
group_length_tell = fp.tell()
# Write the file meta datset, including temp group length
length = write_dataset(fp, meta_dataset)
group_length = length - group_length_data_element_size # counts from end of that
# Save end of file meta to go back to
end_of_file_meta = fp.tell()
# Go back and write the actual group length
fp.seek(group_length_tell)
group_length_data_element = DataElement((2,0), 'UL', group_length)
write_data_element(fp, group_length_data_element)
# Return to end of file meta, ready to write remainder of the file
fp.seek(end_of_file_meta)
def write_file(filename, dataset, WriteLikeOriginal=True):
"""Store a Dataset to the filename specified.
Set dataset.preamble if you want something other than 128 0-bytes.
If the dataset was read from an existing dicom file, then its preamble
was stored at read time. It is up to you to ensure the preamble is still
correct for its purposes.
If there is no Transfer Syntax tag in the dataset,
Set dataset.is_implicit_VR, and .is_little_endian
to determine the transfer syntax used to write the file.
WriteLikeOriginal -- True if want to preserve the following for each sequence
within this dataset:
- preamble -- if no preamble in read file, than not used here
- dataset.hasFileMeta -- if writer did not do file meta information,
then don't write here either
- seq.is_undefined_length -- if original had delimiters, write them now too,
instead of the more sensible length characters
- <dataset>.is_undefined_length_sequence_item -- for datasets that belong to a
sequence, write the undefined length delimiters if that is
what the original had
Set WriteLikeOriginal = False to produce a "nicer" DICOM file for other readers,
where all lengths are explicit.
"""
# Decide whether to write DICOM preamble. Should always do so unless trying to mimic the original file read in
preamble = getattr(dataset, "preamble", None)
if not preamble and not WriteLikeOriginal:
preamble = b"\0"*128
file_meta = dataset.file_meta
if file_meta is None:
file_meta = Dataset()
if 'TransferSyntaxUID' not in file_meta:
if dataset.is_little_endian and dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ImplicitVRLittleEndian)
elif dataset.is_little_endian and not dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ExplicitVRLittleEndian)
elif not dataset.is_little_endian and not dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ExplicitVRBigEndian)
else:
raise NotImplementedError("pydicom has not been verified for Big Endian with Implicit VR")
fp = DicomFile(filename,'wb')
try:
if preamble:
fp.write(preamble) # blank 128 byte preamble
_write_file_meta_info(fp, file_meta)
# Set file VR, endian. MUST BE AFTER writing META INFO (which changes to Explict LittleEndian)
fp.is_implicit_VR = dataset.is_implicit_VR
fp.is_little_endian = dataset.is_little_endian
write_dataset(fp, dataset)
finally:
fp.close()
# Map each VR to a function which can write it
# for write_numbers, the Writer maps to a tuple (function, struct_format)
# (struct_format is python's struct module format)
writers = {'UL':(write_numbers,'L'), 'SL':(write_numbers,'l'),
'US':(write_numbers,'H'), 'SS':(write_numbers, 'h'),
'FL':(write_numbers,'f'), 'FD':(write_numbers, 'd'),
'OF':(write_numbers,'f'),
'OB':write_OBvalue, 'UI':write_UI,
'SH':write_string, 'DA':write_string, 'TM': write_string,
'CS':write_string, 'PN':write_string, 'LO': write_string,
'IS':write_number_string, 'DS':write_number_string, 'AE': write_string,
'AS':write_string,
'LT':write_string,
'SQ':write_sequence,
'UN':write_UN,
'AT':write_ATvalue,
'ST':write_string,
'OW':write_OWvalue,
'US or SS':write_OWvalue,
'OW/OB':write_OBvalue,
'OB/OW':write_OBvalue,
'OB or OW':write_OBvalue,
'OW or OB':write_OBvalue,
'DT':write_string,
'UT':write_string,
} # note OW/OB depends on other items, which we don't know at write time
| Python |
# config.py
"""Pydicom configuration options."""
# Copyright (c) 2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# doc strings following items are picked up by sphinx for documentation
allow_DS_float = False
"""Set allow_float to True to allow DS instances to be created with floats;
otherwise, they must be explicitly converted to strings, with the user
explicity setting the precision of digits and rounding. Default: False"""
enforce_valid_values = True
"""Raise errors if any value is not allowed by DICOM standard, e.g. DS strings
that are longer than 16 characters; IS strings outside the allowed range.
"""
| Python |
# misc.py
"""Miscellaneous helper functions"""
# Copyright (c) 2009 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
_size_factors = dict(KB=1024, MB=1024*1024, GB=1024*1024*1024)
def size_in_bytes(expr):
"""Return the number of bytes for a defer_size argument to read_file()
"""
try:
return int(expr)
except ValueError:
unit = expr[-2:].upper()
if unit in _size_factors.keys():
val = float(expr[:-2]) * _size_factors[unit]
return val
else:
raise ValueError("Unable to parse length with unit '{0:s}'".format(unit))
| Python |
# dicomtree.py
"""Show a dicom file using a hierarchical tree in a graphical window"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
usage = "Usage: python dicomtree.py dicom_filename"
from dicom.valuerep import PersonNameUnicode
import Tix
def RunTree(w, filename):
top = Tix.Frame(w, relief=Tix.RAISED, bd=1)
tree = Tix.Tree(top, options="hlist.columns 2")
tree.pack(expand=1, fill=Tix.BOTH, padx=10, pady=10, side=Tix.LEFT)
# print(tree.hlist.keys()) # use to see the available configure() options
tree.hlist.configure(bg='white', font='Courier 10', indent=30)
tree.hlist.configure(selectbackground='light yellow', gap=150)
box = Tix.ButtonBox(w, orientation=Tix.HORIZONTAL)
# box.add('ok', text='Ok', underline=0, command=w.destroy, width=6)
box.add('exit', text='Exit', underline=0, command=w.destroy, width=6)
box.pack(side=Tix.BOTTOM, fill=Tix.X)
top.pack(side=Tix.TOP, fill=Tix.BOTH, expand=1)
show_file(filename, tree)
def show_file(filename, tree):
tree.hlist.add("root", text=filename)
ds = dicom.read_file(sys.argv[1])
ds.decode() # change strings to unicode
recurse_tree(tree, ds, "root", False)
tree.autosetmode()
def recurse_tree(tree, dataset, parent, hide=False):
# order the dicom tags
for data_element in dataset:
node_id = parent + "." + hex(id(data_element))
if isinstance(data_element.value, unicode):
tree.hlist.add(node_id, text=unicode(data_element))
else:
tree.hlist.add(node_id, text=str(data_element))
if hide:
tree.hlist.hide_entry(node_id)
if data_element.VR == "SQ": # a sequence
for i, dataset in enumerate(data_element.value):
item_id = node_id + "." + str(i+1)
sq_item_description = data_element.name.replace(" Sequence", "") # XXX not i18n
item_text = "{0:s} {1:d}".format(sq_item_description, i+1)
tree.hlist.add(item_id, text=item_text)
tree.hlist.hide_entry(item_id)
recurse_tree(tree, dataset, item_id, hide=True)
if __name__ == '__main__':
import sys
import dicom
if len(sys.argv) != 2:
print("Please supply a dicom file name:\n")
print(usage)
sys.exit(-1)
root = Tix.Tk()
root.geometry("{0:d}x{1:d}+{2:d}+{3:d}".format(800, 600, 0, 0))
RunTree(root, sys.argv[1])
root.mainloop()
| Python |
# DicomDiff.py
"""Show the difference between two dicom files.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
usage = """
Usage:
python DicomDiff.py file1 file2
Results printed in python difflib form - indicated by start of each line:
' ' blank means lines the same
'-' means in file1 but "removed" in file2
'+' means not in file1, but "added" in file2
('?' lines from difflib removed - no use here)
"""
import sys
import dicom
import difflib
# only used as a script
if len(sys.argv) != 3:
print(usage)
sys.exit()
datasets = dicom.read_file(sys.argv[1]), \
dicom.read_file(sys.argv[2])
# diflib compare functions require a list of lines, each terminated with newline character
# massage the string representation of each dicom dataset into this form:
rep = []
for dataset in datasets:
lines = str(dataset).split("\n")
lines = [line + "\n" for line in lines] # add the newline to end
rep.append(lines)
diff = difflib.Differ()
for line in diff.compare(rep[0], rep[1]):
if line[0] != "?":
print(line)
| Python |
# ListBeams.py
"""Given an RTPLAN DICOM file, list basic info for the beams in it
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
import dicom
usage = """python ListBeams.py rtplan.dcm"""
def ListBeams(plan_dataset):
"""Return a string summarizing the RTPLAN beam information in the dataset"""
lines = ["{name:^13s} {num:^8s} {gantry:^8s} {ssd:^11s}".format(
name="Beam name", num="Number", gantry="Gantry", ssd="SSD (cm)")]
for beam in plan_dataset.BeamSequence:
cp0 = beam.ControlPointSequence[0]
SSD = float(cp0.SourcetoSurfaceDistance / 10)
lines.append("{b.BeamName:^13s} {b.BeamNumber:8d} "
"{gantry:8.1f} {ssd:8.1f}".format(b=beam,
gantry=cp0.GantryAngle, ssd=SSD))
return "\n".join(lines)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print(usage)
sys.exit(-1)
rtplan = dicom.read_file(sys.argv[1])
print(ListBeams(rtplan))
| Python |
# DicomInfo.py
"""
Read a DICOM file and print some or all of its values.
Usage: python DicomInfo.py imagefile [-v]
-v (optional): Verbose mode, prints all DICOM data elements
Without the -v option, a few of the most common dicom file
data elements are printed: some info about the patient and about
the image.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
import sys
import dicom
# check command line arguments make sense
if not 1 < len(sys.argv) < 4:
print(__doc__)
sys.exit()
# read the file
filename = sys.argv[1]
dataset = dicom.read_file(filename)
# Verbose mode:
if len(sys.argv) == 3:
if sys.argv[2]=="-v": #user asked for all info
print(dataset)
else: # unknown command argument
print(__doc__)
sys.exit()
# Normal mode:
print()
print("Filename.........:", filename)
print("Storage type.....:", dataset.SOPClassUID)
print()
pat_name = dataset.PatientName
display_name = pat_name.family_name + ", " + pat_name.given_name
print("Patient's name...:", display_name)
print("Patient id.......:", dataset.PatientID)
print("Modality.........:", dataset.Modality)
print("Study Date.......:", dataset.StudyDate)
if 'PixelData' in dataset:
rows = int(dataset.Rows)
cols = int(dataset.Columns)
print("Image size.......: {rows:d} x {cols:d}, {size:d} bytes".format(
rows=rows, cols=cols, size=len(dataset.PixelData)))
if 'PixelSpacing' in dataset:
print("Pixel spacing....:", dataset.PixelSpacing)
# use .get() if not sure the item exists, and want a default value if missing
print("Slice location...:", dataset.get('SliceLocation', "(missing)"))
| Python |
# show_charset_name.py
"""Very simple app to display unicode person names"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import Tkinter
from dicom.valuerep import PersonName, PersonNameUnicode
default_encoding = 'iso8859'
root = Tkinter.Tk()
# root.geometry("%dx%d%+d%+d" % (800, 600, 0, 0))
person_names = [
PersonNameUnicode(
"""Yamada^Tarou=\033$B;3ED\033(B^\033$BB@O:\033(B=\033$B$d$^$@\033(B^\033$B$?$m$&\033(B""",
[default_encoding, 'iso2022_jp']), # DICOM standard 2008-PS3.5 H.3 p 98
PersonNameUnicode(
"""Wang^XiaoDong=\xcd\xf5\x5e\xd0\xa1\xb6\xab=""",
[default_encoding, 'GB18030']), # DICOM standard 2008-PS3.5 J.3 p 105
PersonNameUnicode(
"""Wang^XiaoDong=\xe7\x8e\x8b\x5e\xe5\xb0\x8f\xe6\x9d\xb1=""",
[default_encoding, 'UTF-8']), # DICOM standard 2008-PS3.5 J.1 p 104
PersonNameUnicode(
"""Hong^Gildong=\033$)C\373\363^\033$)C\321\316\324\327=\033$)C\310\253^\033$)C\261\346\265\277""",
[default_encoding, 'euc_kr']), # DICOM standard 2008-PS3.5 I.2 p 101
]
for person_name in person_names:
label = Tkinter.Label(text=person_name)
label.pack()
root.mainloop()
| Python |
# write_new.py
"""Simple example of writing a DICOM file from scratch using pydicom.
This example does not produce a DICOM standards compliant file as written,
you will have to change UIDs to valid values and add all required DICOM data
elements
"""
# Copyright (c) 2010-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
import sys
import os.path
import dicom
from dicom.dataset import Dataset, FileDataset
import dicom.UID
if __name__ == "__main__":
print("---------------------------- ")
print("write_new.py example program")
print("----------------------------")
print("Demonstration of writing a DICOM file using pydicom")
print("NOTE: this is only a demo. Writing a DICOM standards compliant file")
print("would require official UIDs, and checking the DICOM standard to ensure")
print("that all required data elements were present.")
print()
if sys.platform.lower().startswith("win"):
filename = r"c:\temp\test.dcm"
filename2 = r"c:\temp\test-explBig.dcm"
else:
homedir = os.path.expanduser("~")
filename = os.path.join(homedir, "test.dcm")
filename2 = os.path.join(homedir, "test-explBig.dcm")
print("Setting file meta information...")
# Populate required values for file meta information
file_meta = Dataset()
file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2' # CT Image Storage
file_meta.MediaStorageSOPInstanceUID = "1.2.3" # !! Need valid UID here for real work
file_meta.ImplementationClassUID = "1.2.3.4" # !!! Need valid UIDs here
print("Setting dataset values...")
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
ds = FileDataset(filename, {}, file_meta=file_meta, preamble="\0"*128)
# Add the data elements -- not trying to set all required here. Check DICOM standard
ds.PatientName = "Test^Firstname"
ds.PatientID = "123456"
# Set the transfer syntax
ds.is_little_endian = True
ds.is_implicit_VR = True
print("Writing test file", filename)
ds.save_as(filename)
print("File saved.")
# Write as a different transfer syntax
ds.file_meta.TransferSyntaxUID = dicom.UID.ExplicitVRBigEndian #XXX shouldn't need this but pydicom 0.9.5 bug not recognizing transfer syntax
ds.is_little_endian = False
ds.is_implicit_VR = False
print("Writing test file as Big Endian Explicit VR", filename2)
ds.save_as(filename2)
| Python |
# anonymize.py
"""Read a dicom file (or directory of files), partially "anonymize" it (them),
by replacing Person names, patient id, optionally remove curves
and private tags, and write result to a new file (directory)
This is an example only; use only as a starting point.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
# Use at your own risk!!
# Many more items need to be addressed for proper de-identifying DICOM data.
# In particular, note that pixel data could have confidential data "burned in"
# Annex E of PS3.15-2011 DICOM standard document details what must be done to
# fully de-identify DICOM data
from __future__ import print_function
usage = """
Usage:
python anonymize.py dicomfile.dcm outputfile.dcm
OR
python anonymize.py originals_directory anonymized_directory
Note: Use at your own risk. Does not fully de-identify the DICOM data as per
the DICOM standard, e.g in Annex E of PS3.15-2011.
"""
import os, os.path
import dicom
def anonymize(filename, output_filename, new_person_name="anonymous",
new_patient_id="id", remove_curves=True, remove_private_tags=True):
"""Replace data element values to partly anonymize a DICOM file.
Note: completely anonymizing a DICOM file is very complicated; there
are many things this example code does not address. USE AT YOUR OWN RISK.
"""
# Define call-back functions for the dataset.walk() function
def PN_callback(ds, data_element):
"""Called from the dataset "walk" recursive function for all data elements."""
if data_element.VR == "PN":
data_element.value = new_person_name
def curves_callback(ds, data_element):
"""Called from the dataset "walk" recursive function for all data elements."""
if data_element.tag.group & 0xFF00 == 0x5000:
del ds[data_element.tag]
# Load the current dicom file to 'anonymize'
dataset = dicom.read_file(filename)
# Remove patient name and any other person names
dataset.walk(PN_callback)
# Change ID
dataset.PatientID = new_patient_id
# Remove data elements (should only do so if DICOM type 3 optional)
# Use general loop so easy to add more later
# Could also have done: del ds.OtherPatientIDs, etc.
for name in ['OtherPatientIDs', 'OtherPatientIDsSequence']:
if name in dataset:
delattr(dataset, name)
# Same as above but for blanking data elements that are type 2.
for name in ['PatientBirthDate']:
if name in dataset:
dataset.data_element(name).value = ''
# Remove private tags if function argument says to do so. Same for curves
if remove_private_tags:
dataset.remove_private_tags()
if remove_curves:
dataset.walk(curves_callback)
# write the 'anonymized' DICOM out under the new filename
dataset.save_as(output_filename)
# Can run as a script:
if __name__ == "__main__":
import sys
if len(sys.argv) != 3:
print(usage)
sys.exit()
arg1, arg2 = sys.argv[1:]
if os.path.isdir(arg1):
in_dir = arg1
out_dir = arg2
if os.path.exists(out_dir):
if not os.path.isdir(out_dir):
raise IOError("Input is directory; output name exists but is not a directory")
else: # out_dir does not exist; create it.
os.makedirs(out_dir)
filenames = os.listdir(in_dir)
for filename in filenames:
if not os.path.isdir(os.path.join(in_dir, filename)):
print(filename + "...", end='')
anonymize(os.path.join(in_dir, filename), os.path.join(out_dir, filename))
print("done\r")
else: # first arg not a directory, assume two files given
in_filename = arg1
out_filename = arg2
anonymize(in_filename, out_filename)
print()
| Python |
# __init__.py
# Mark the folder as a python package
| Python |
# myprint.py
"""Example of printing a dataset in your own format"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
def myprint(dataset, indent=0):
"""Go through all items in the dataset and print them with custom format
Modelled after Dataset._pretty_str()
"""
dont_print = ['Pixel Data', 'File Meta Information Version']
indent_string = " " * indent
next_indent_string = " " * (indent+1)
for data_element in dataset:
if data_element.VR == "SQ": # a sequence
print(indent_string, data_element.name)
for sequence_item in data_element.value:
myprint(sequence_item, indent+1)
print(next_indent_string + "---------")
else:
if data_element.name in dont_print:
print("""<item not printed -- in the "don't print" list>""")
else:
repr_value = repr(data_element.value)
if len(repr_value) > 50:
repr_value = repr_value[:50] + "..."
print("{0:s} {1:s} = {2:s}".format(indent_string,
data_element.name, repr_value))
if __name__ == "__main__":
import dicom
import sys
usage = """Usage: myprint filename"""
if len(sys.argv) != 2:
print(usage)
sys.exit()
ds = dicom.read_file(sys.argv[1])
myprint(ds)
| Python |
# filebase.py
"""Hold DicomFile class, which does basic I/O for a dicom file."""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import absolute_import
from dicom.tag import Tag
from struct import unpack, pack
from io import BytesIO
import logging
logger = logging.getLogger('pydicom')
class DicomIO(object):
"""File object which holds transfer syntax info and anything else we need."""
max_read_attempts = 3 # number of times to read if don't get requested bytes
defer_size = None # default
def __init__(self, *args, **kwargs):
self._implicit_VR = True # start with this by default
def __del__(self):
self.close()
def read_le_tag(self):
"""Read and return two unsigned shorts (little endian) from the file."""
bytes_read = self.read(4)
if len(bytes_read) < 4:
raise EOFError # needed for reading "next" tag when at end of file
return unpack(bytes_read, b"<HH")
def read_be_tag(self):
"""Read and return two unsigned shorts (little endian) from the file."""
bytes_read = self.read(4)
if len(bytes_read) < 4:
raise EOFError # needed for reading "next" tag when at end of file
return unpack(bytes_read, b">HH")
def write_tag(self, tag):
"""Write a dicom tag (two unsigned shorts) to the file."""
tag = Tag(tag) # make sure is an instance of class, not just a tuple or int
self.write_US(tag.group)
self.write_US(tag.element)
def read_leUS(self):
"""Return an unsigned short from the file with little endian byte order"""
return unpack(b"<H", self.read(2))[0]
def read_beUS(self):
"""Return an unsigned short from the file with big endian byte order"""
return unpack(b">H", self.read(2))[0]
def read_leUL(self):
"""Return an unsigned long read with little endian byte order"""
return unpack(b"<L", self.read(4))[0]
def read(self, length=None, need_exact_length=True):
"""Reads the required length, returns EOFError if gets less
If length is None, then read all bytes
"""
parent_read = self.parent_read # super(DicomIO, self).read
if length is None:
return parent_read() # get all of it
bytes_read = parent_read(length)
if len(bytes_read) < length and need_exact_length:
# Didn't get all the desired bytes. Keep trying to get the rest. If reading across network, might want to add a delay here
attempts = 0
while attempts < self.max_read_attempts and len(bytes_read) < length:
bytes_read += parent_read(length-len(bytes_read))
attempts += 1
if len(bytes_read) < length:
start_pos = self.tell() - len(bytes_read)
msg = "Unexpected end of file. "
msg += "Read {0} bytes of {1} expected starting at position 0x{2:x}".format(len(bytes_read), length, start_pos)
raise EOFError(msg)
return bytes_read
def write_leUS(self, val):
"""Write an unsigned short with little endian byte order"""
self.write(pack(b"<H", val))
def write_leUL(self, val):
"""Write an unsigned long with little endian byte order"""
self.write(pack(b"<L", val))
def write_beUS(self, val):
"""Write an unsigned short with big endian byte order"""
self.write(pack(b">H", val))
def write_beUL(self, val):
"""Write an unsigned long with big endian byte order"""
self.write(pack(b">L", val))
write_US = write_leUS # XXX should we default to this?
write_UL = write_leUL # XXX "
def read_beUL(self):
"""Return an unsigned long read with big endian byte order"""
return unpack(b">L", self.read(4))[0]
# Set up properties is_little_endian and is_implicit_VR
# Big/Little Endian changes functions to read unsigned short or long, e.g. length fields etc
@property
def is_little_endian(self):
return self._little_endian
@is_little_endian.setter
def is_little_endian(self, value):
self._little_endian = value
if value: # Little Endian
self.read_US = self.read_leUS
self.read_UL = self.read_leUL
self.write_US = self.write_leUS
self.write_UL = self.write_leUL
self.read_tag = self.read_le_tag
else: # Big Endian
self.read_US = self.read_beUS
self.read_UL = self.read_beUL
self.write_US = self.write_beUS
self.write_UL = self.write_beUL
self.read_tag = self.read_be_tag
@property
def is_implicit_VR(self):
return self._implicit_VR
@is_implicit_VR.setter
def is_implicit_VR(self, value):
self._implicit_VR = value
class DicomFileLike(DicomIO):
def __init__(self, file_like_obj):
self.parent = file_like_obj
self.parent_read = file_like_obj.read
self.write = getattr(file_like_obj, "write", self.no_write)
self.seek = file_like_obj.seek
self.tell = file_like_obj.tell
self.close = file_like_obj.close
self.name = getattr(file_like_obj, 'name', '<no filename>')
def no_write(self, bytes_read):
"""Used for file-like objects where no write is available"""
raise IOError("This DicomFileLike object has no write() method")
def DicomFile(*args, **kwargs):
return DicomFileLike(open(*args, **kwargs))
def DicomBytesIO(*args, **kwargs):
return DicomFileLike(BytesIO(*args, **kwargs))
| Python |
# UID.py
"""Dicom Unique identifiers"""
# Copyright (c) 2008 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os
import uuid
import time, datetime
from math import fabs
from _UID_dict import UID_dictionary
class InvalidUID(Exception):
'''
Throw when DICOM UID is invalid
Example of invalid UID::
>>> uid = '1.2.123.'
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class UID(str):
"""Subclass python string so have human-friendly UIDs
Use like:
uid = UID('1.2.840.10008.1.2.4.50')
then
uid.name, uid.type, uid.info, and uid.is_retired all return
values from the UID_dictionary
String representation (__str__) will be the name,
__repr__ will be the full 1.2.840....
"""
def __new__(cls, val):
"""Set up new instance of the class"""
# Don't repeat if already a UID class -- then may get the name
# that str(uid) gives rather than the dotted number
if isinstance(val, UID):
return val
else:
if isinstance(val, basestring):
return super(UID, cls).__new__(cls, val.strip())
else:
raise TypeError("UID must be a string")
def __init__(self, val):
"""Initialize the UID properties
Sets name, type, info, is_retired, and is_transfer_syntax.
If UID is a transfer syntax, also sets is_little_endian, is_implicit_VR,
and is_deflated boolean values.
"""
# Note normally use __new__ on subclassing an immutable, but here we just want
# to do some pre-processing against the UID dictionary.
# "My" string can never change (it is a python immutable), so is safe
if self in UID_dictionary:
self.name, self.type, self.info, retired = UID_dictionary[self]
self.is_retired = bool(retired)
else:
self.name = str.__str__(self)
self.type, self.info, self.is_retired = (None, None, None)
# If the UID represents a transfer syntax, store info about that syntax
self.is_transfer_syntax = (self.type == "Transfer Syntax")
if self.is_transfer_syntax:
# Assume a transfer syntax, correct it as necessary
self.is_implicit_VR = True
self.is_little_endian = True
self.is_deflated = False
if val == '1.2.840.10008.1.2': # implicit VR little endian
pass
elif val == '1.2.840.10008.1.2.1': # ExplicitVRLittleEndian
self.is_implicit_VR = False
elif val == '1.2.840.10008.1.2.2': # ExplicitVRBigEndian
self.is_implicit_VR = False
self.is_little_endian = False
elif val == '1.2.840.10008.1.2.1.99': # DeflatedExplicitVRLittleEndian:
self.is_deflated = True
self.is_implicit_VR = False
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE by Standard PS 3.5-2008 A.4 (p63)
self.is_implicit_VR = False
def __str__(self):
"""Return the human-friendly name for this UID"""
return self.name
def __eq__(self, other):
"""Override string equality so either name or UID number match passes"""
if str.__eq__(self, other) is True: # 'is True' needed (issue 96)
return True
if str.__eq__(self.name, other) is True: # 'is True' needed (issue 96)
return True
return False
def is_valid(self):
'''
Raise an exception is the UID is invalid
Usage example::
>>> invalid_uid = dicom.UID.UID('1.2.345.')
>>> invalid_uid.is_valid(invalid_uid)
InvalidUID: 'Trailing dot at the end of the UID'
>>> valid_uid = dicom.UID.UID('1.2.123')
'''
if self[-1] == '.':
raise InvalidUID('Trailing dot at the end of the UID')
# For python 3, any override of __cmp__ or __eq__ immutable requires
# explicit redirect of hash function to the parent class
# See http://docs.python.org/dev/3.0/reference/datamodel.html#object.__hash__
def __hash__(self):
return super(UID, self).__hash__()
ExplicitVRLittleEndian = UID('1.2.840.10008.1.2.1')
ImplicitVRLittleEndian = UID('1.2.840.10008.1.2')
DeflatedExplicitVRLittleEndian = UID('1.2.840.10008.1.2.1.99')
ExplicitVRBigEndian = UID('1.2.840.10008.1.2.2')
NotCompressedPixelTransferSyntaxes = [ExplicitVRLittleEndian,
ImplicitVRLittleEndian,
DeflatedExplicitVRLittleEndian,
ExplicitVRBigEndian]
# Many thanks to the Medical Connections for offering free valid UIDs (http://www.medicalconnections.co.uk/FreeUID.html)
# Their service was used to obtain the following root UID for pydicom:
pydicom_root_UID = '1.2.826.0.1.3680043.8.498.'
pydicom_UIDs = {
pydicom_root_UID + '1': 'ImplementationClassUID',
}
def generate_uid(prefix=pydicom_root_UID, truncate=False):
'''
Generate a dicom unique identifier based on host id, process id and current
time. The max lenght of the generated UID is 64 caracters.
If the given prefix is ``None``, the UID is generated following the method
described on `David Clunie website
<http://www.dclunie.com/medical-image-faq/html/part2.html#UID>`_
Usage example::
>>> dicom.UID.generate_uid()
1.2.826.0.1.3680043.8.498.2913212949509824014974371514
>>> dicom.UID.generate_uid(None)
2.25.31215762025423160614120088028604965760
This method is inspired from the work of `DCMTK
<http://dicom.offis.de/dcmtk.php.en>`_.
:param prefix: The site root UID. Default to pydicom root UID.
'''
max_uid_len = 64
if prefix is None:
dicom_uid = '2.25.{0}'.format(uuid.uuid1().int)
else:
uid_info = [uuid.getnode(),
fabs(os.getpid()),
datetime.datetime.today().second,
datetime.datetime.today().microsecond]
suffix = ''.join([str(long(x)) for x in uid_info])
dicom_uid = ''.join([prefix, suffix])
if truncate:
dicom_uid = dicom_uid[:max_uid_len]
dicom_uid = UID(dicom_uid)
#This will raise an exception if the UID is invalid
dicom_uid.is_valid()
return dicom_uid
| Python |
# filereader.py
"""Read a dicom media file"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import absolute_import
# Need zlib and io.BytesIO for deflate-compressed file
import os.path
import warnings
import zlib
from io import BytesIO
import logging
from dicom.tag import TupleTag
from dicom.dataelem import RawDataElement
from dicom.util.hexutil import bytes2hex
from dicom.valuerep import extra_length_VRs
from dicom.charset import default_encoding
from dicom import in_py3
logger = logging.getLogger('pydicom')
stat_available = True
try:
from os import stat
except:
stat_available = False
from os import SEEK_CUR
import dicom.UID # for Implicit/Explicit/Little/Big Endian transfer syntax UIDs
from dicom.filebase import DicomFile, DicomFileLike
from dicom.filebase import DicomIO, DicomBytesIO
from dicom.dataset import Dataset, FileDataset
from dicom.datadict import dictionaryVR
from dicom.dataelem import DataElement, DeferredDataElement
from dicom.tag import Tag, ItemTag, ItemDelimiterTag, SequenceDelimiterTag
from dicom.sequence import Sequence
from dicom.misc import size_in_bytes
from dicom.fileutil import absorb_delimiter_item, read_undefined_length_value
from dicom.fileutil import length_of_undefined_length
from struct import Struct, unpack
from sys import byteorder
sys_is_little_endian = (byteorder == 'little')
class InvalidDicomError(Exception):
"""Exception that is raised when the the file does not seem
to be a valid dicom file. This is the case when the four
characters "DICM" are not present at position 128 in the file.
(According to the dicom specification, each dicom file should
have this.)
To force reading the file (because maybe it is a dicom file without
a header), use read_file(..., force=True).
"""
def __init__(self, *args):
if not args:
args = ('The specified file is not a valid DICOM file.',)
Exception.__init__(self, *args)
class DicomIter(object):
"""Iterator over DICOM data elements created from a file-like object
"""
def __init__(self, fp, stop_when=None, force=False):
"""Read the preamble and meta info, prepare iterator for remainder
fp -- an open DicomFileLike object, at start of file
Adds flags to fp: Big/Little-endian and Implicit/Explicit VR
"""
self.fp = fp
self.stop_when = stop_when
self.preamble = preamble = read_preamble(fp, force)
self.has_header = has_header = (preamble is not None)
self.file_meta_info = Dataset()
if has_header:
self.file_meta_info = file_meta_info = _read_file_meta_info(fp)
transfer_syntax = file_meta_info.TransferSyntaxUID
if transfer_syntax == dicom.UID.ExplicitVRLittleEndian:
self._is_implicit_VR = False
self._is_little_endian = True
elif transfer_syntax == dicom.UID.ImplicitVRLittleEndian:
self._is_implicit_VR = True
self._is_little_endian = True
elif transfer_syntax == dicom.UID.ExplicitVRBigEndian:
self._is_implicit_VR = False
self._is_little_endian = False
elif transfer_syntax == dicom.UID.DeflatedExplicitVRLittleEndian:
# See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset
# following the file metadata was prepared the normal way,
# then "deflate" compression applied.
# All that is needed here is to decompress and then
# use as normal in a file-like object
zipped = fp.read()
# -MAX_WBITS part is from comp.lang.python answer:
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
fp = BytesIO(unzipped) # a file-like object
self.fp = fp #point to new object
self._is_implicit_VR = False
self._is_little_endian = True
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE
# by Standard PS 3.5-2008 A.4 (p63)
self._is_implicit_VR = False
self._is_little_endian = True
else: # no header -- make assumptions
fp.TransferSyntaxUID = dicom.UID.ImplicitVRLittleEndian
self._is_little_endian = True
self._is_implicit_VR = True
impl_expl = ("Explicit", "Implicit")[self._is_implicit_VR]
big_little = ("Big", "Little")[self._is_little_endian]
logger.debug("Using {0:s} VR, {1:s} Endian transfer syntax".format(
impl_expl, big_little))
def __iter__(self):
tags = sorted(self.file_meta_info.keys())
for tag in tags:
yield self.file_meta_info[tag]
for data_element in data_element_generator(self.fp,
self._is_implicit_VR, self._is_little_endian,
stop_when=self.stop_when):
yield data_element
def data_element_generator(fp, is_implicit_VR, is_little_endian,
stop_when=None, defer_size=None):
"""Create a generator to efficiently return the raw data elements
Returns (VR, length, raw_bytes, value_tell, is_little_endian),
where:
VR -- None if implicit VR, otherwise the VR read from the file
length -- the length as in the DICOM data element (could be
DICOM "undefined length" 0xffffffffL),
value_bytes -- the raw bytes from the DICOM file
(not parsed into python types)
is_little_endian -- True if transfer syntax is little endian; else False
"""
# Summary of DICOM standard PS3.5-2008 chapter 7:
# If Implicit VR, data element is:
# tag, 4-byte length, value.
# The 4-byte length can be FFFFFFFF (undefined length)*
# If Explicit VR:
# if OB, OW, OF, SQ, UN, or UT:
# tag, VR, 2-bytes reserved (both zero), 4-byte length, value
# For all but UT, the length can be FFFFFFFF (undefined length)*
# else: (any other VR)
# tag, VR, (2 byte length), value
# * for undefined length, a Sequence Delimitation Item marks the end
# of the Value Field.
# Note, except for the special_VRs, both impl and expl VR use 8 bytes;
# the special VRs follow the 8 bytes with a 4-byte length
# With a generator, state is stored, so we can break down
# into the individual cases, and not have to check them again for each
# data element
if is_little_endian:
endian_chr = "<"
else:
endian_chr = ">"
if is_implicit_VR:
element_struct = Struct(endian_chr + "HHL")
else: # Explicit VR
# tag, VR, 2-byte length (or 0 if special VRs)
element_struct = Struct(endian_chr + "HH2sH")
extra_length_struct = Struct(endian_chr + "L") # for special VRs
extra_length_unpack = extra_length_struct.unpack # for lookup speed
# Make local variables so have faster lookup
fp_read = fp.read
fp_tell = fp.tell
logger_debug = logger.debug
debugging = dicom.debugging
element_struct_unpack = element_struct.unpack
while True:
# Read tag, VR, length, get ready to read value
bytes_read = fp_read(8)
if len(bytes_read) < 8:
raise StopIteration # at end of file
if debugging: debug_msg = "{0:08x}: {1}".format(fp.tell()-8,
bytes2hex(bytes_read))
if is_implicit_VR:
# must reset VR each time; could have set last iteration (e.g. SQ)
VR = None
group, elem, length = element_struct_unpack(bytes_read)
else: # explicit VR
group, elem, VR, length = element_struct_unpack(bytes_read)
if in_py3:
VR = VR.decode(default_encoding)
if VR in extra_length_VRs:
bytes_read = fp_read(4)
length = extra_length_unpack(bytes_read)[0]
if debugging: debug_msg += " " + bytes2hex(bytes_read)
if debugging:
debug_msg = "%-47s (%04x, %04x)" % (debug_msg, group, elem)
if not is_implicit_VR: debug_msg += " %s " % VR
if length != 0xFFFFFFFFL:
debug_msg += "Length: %d" % length
else:
debug_msg += "Length: Undefined length (FFFFFFFF)"
logger_debug(debug_msg)
# Positioned to read the value, but may not want to -- check stop_when
value_tell = fp_tell()
tag = TupleTag((group, elem))
if stop_when is not None:
# XXX VR may be None here!! Should stop_when just take tag?
if stop_when(tag, VR, length):
if debugging:
logger_debug("Reading ended by stop_when callback. "
"Rewinding to start of data element.")
rewind_length = 8
if not is_implicit_VR and VR in extra_length_VRs:
rewind_length += 4
fp.seek(value_tell-rewind_length)
raise StopIteration
# Reading the value
# First case (most common): reading a value with a defined length
if length != 0xFFFFFFFFL:
if defer_size is not None and length > defer_size:
# Flag as deferred by setting value to None, and skip bytes
value = None
logger_debug("Defer size exceeded."
"Skipping forward to next data element.")
fp.seek(fp_tell()+length)
else:
value = fp_read(length)
if debugging:
dotdot = " "
if length > 12:
dotdot = "..."
logger_debug("%08x: %-34s %s %r %s" % (value_tell,
bytes2hex(value[:12]), dotdot, value[:12], dotdot))
yield RawDataElement(tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
# Second case: undefined length - must seek to delimiter,
# unless is SQ type, in which case is easier to parse it, because
# undefined length SQs and items of undefined lengths can be nested
# and it would be error-prone to read to the correct outer delimiter
else:
# Try to look up type to see if is a SQ
# if private tag, won't be able to look it up in dictionary,
# in which case just ignore it and read the bytes
if VR is None:
try:
VR = dictionaryVR(tag)
except KeyError:
pass
if VR == 'SQ':
if debugging:
msg = "{0:08x}: Reading/parsing undefined length sequence"
logger_debug(msg.format(fp_tell()))
seq = read_sequence(fp, is_implicit_VR,
is_little_endian, length)
yield DataElement(tag, VR, seq, value_tell,
is_undefined_length=True)
else:
delimiter = SequenceDelimiterTag
if debugging:
logger_debug("Reading undefined length data element")
value = read_undefined_length_value(fp, is_little_endian,
delimiter, defer_size)
yield RawDataElement(tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
stop_when=None, defer_size=None):
"""Return a Dataset instance containing the next dataset in the file.
:param fp: an opened file object
:param is_implicit_VR: True if file transfer syntax is implicit VR
:param is_little_endian: True if file has little endian transfer syntax
:param bytelength: None to read until end of file or ItemDeliterTag, else
a fixed number of bytes to read
:param stop_when: optional call_back function which can terminate reading.
See help for data_element_generator for details
:param defer_size: optional size to avoid loading large elements in memory.
See help for data_element_generator for details
:returns: a Dataset instance
"""
raw_data_elements = dict()
fpStart = fp.tell()
de_gen = data_element_generator(fp, is_implicit_VR, is_little_endian,
stop_when, defer_size)
try:
while (bytelength is None) or (fp.tell()-fpStart < bytelength):
raw_data_element = next(de_gen)
# Read data elements. Stop on some errors, but return what was read
tag = raw_data_element.tag
# Check for ItemDelimiterTag --dataset is an item in a sequence
if tag == (0xFFFE, 0xE00D):
break
raw_data_elements[tag] = raw_data_element
except StopIteration:
pass
except EOFError as details:
# XXX is this error visible enough to user code with just logging?
logger.error(str(details) + " in file " +
getattr(fp, "name", "<no filename>"))
except NotImplementedError as details:
logger.error(details)
return Dataset(raw_data_elements)
def read_sequence(fp, is_implicit_VR, is_little_endian, bytelength, offset=0):
"""Read and return a Sequence -- i.e. a list of Datasets"""
seq = [] # use builtin list to start for speed, convert to Sequence at end
is_undefined_length = False
if bytelength != 0: # SQ of length 0 possible (PS 3.5-2008 7.5.1a (p.40)
if bytelength == 0xffffffffL:
is_undefined_length = True
bytelength = None
fp_tell = fp.tell # for speed in loop
fpStart = fp_tell()
while (not bytelength) or (fp_tell()-fpStart < bytelength):
file_tell = fp.tell()
dataset = read_sequence_item(fp, is_implicit_VR, is_little_endian)
if dataset is None: # None is returned if hit Sequence Delimiter
break
dataset.file_tell = file_tell+offset
seq.append(dataset)
seq = Sequence(seq)
seq.is_undefined_length = is_undefined_length
return seq
def read_sequence_item(fp, is_implicit_VR, is_little_endian):
"""Read and return a single sequence item, i.e. a Dataset"""
if is_little_endian:
tag_length_format = "<HHL"
else:
tag_length_format = ">HHL"
try:
bytes_read = fp.read(8)
group, element, length = unpack(tag_length_format, bytes_read)
except:
raise IOError("No tag to read at file position "
"{0:05x}".format(fp.tell()))
tag = (group, element)
if tag == SequenceDelimiterTag: # No more items, time to stop reading
data_element = DataElement(tag, None, None, fp.tell()-4)
logger.debug("{0:08x}: {1}".format(fp.tell()-8, "End of Sequence"))
if length != 0:
logger.warning("Expected 0x00000000 after delimiter, found 0x%x,"
" at position 0x%x" % (length, fp.tell()-4))
return None
if tag != ItemTag:
logger.warning("Expected sequence item with tag %s at file position "
"0x%x" % (ItemTag, fp.tell()-4))
else:
logger.debug("{0:08x}: {1} Found Item tag (start of item)".format(
fp.tell()-4, bytes2hex(bytes_read)))
is_undefined_length = False
if length == 0xFFFFFFFFL:
ds = read_dataset(fp, is_implicit_VR, is_little_endian,
bytelength=None)
ds.is_undefined_length_sequence_item = True
else:
ds = read_dataset(fp, is_implicit_VR, is_little_endian, length)
logger.debug("%08x: Finished sequence item" % fp.tell())
return ds
def not_group2(tag, VR, length):
return (tag.group != 2)
def _read_file_meta_info(fp):
"""Return the file meta information.
fp must be set after the 128 byte preamble and 'DICM' marker
"""
# File meta info always LittleEndian, Explicit VR. After will change these
# to the transfer syntax values set in the meta info
# Get group length data element, whose value is the length of the meta_info
fp_save = fp.tell() # in case need to rewind
debugging = dicom.debugging
if debugging: logger.debug("Try to read group length info...")
bytes_read = fp.read(8)
group, elem, VR, length = unpack("<HH2sH", bytes_read)
if debugging:
debug_msg = "{0:08x}: {1}".format(fp.tell()-8, bytes2hex(bytes_read))
if in_py3:
VR = VR.decode(default_encoding)
if VR in extra_length_VRs:
bytes_read = fp.read(4)
length = unpack("<L", bytes_read)[0]
if debugging: debug_msg += " " + bytes2hex(bytes_read)
if debugging:
debug_msg = "{0:<47s} ({1:04x}, {2:04x}) {3:2s} Length: {4:d}".format(
debug_msg, group, elem, VR, length)
logger.debug(debug_msg)
# Store meta group length if it exists, then read until not group 2
if group == 2 and elem == 0:
bytes_read = fp.read(length)
if debugging: logger.debug("{0:08x}: {1}".format(fp.tell()-length,
bytes2hex(bytes_read)))
group_length = unpack("<L", bytes_read)[0]
expected_ds_start = fp.tell() + group_length
if debugging:
msg = "value (group length) = {0:d}".format(group_length)
msg += " regular dataset should start at {0:08x}".format(
expected_ds_start)
logger.debug(" "*10 + msg)
else:
expected_ds_start = None
if debugging:
logger.debug(" " * 10 + "(0002,0000) Group length not found.")
# Changed in pydicom 0.9.7 -- don't trust the group length, just read
# until no longer group 2 data elements. But check the length and
# give a warning if group 2 ends at different location.
# Rewind to read the first data element as part of the file_meta dataset
if debugging:
logger.debug("Rewinding and reading whole dataset "
"including this first data element")
fp.seek(fp_save)
file_meta = read_dataset(fp, is_implicit_VR=False,
is_little_endian=True, stop_when=not_group2)
fp_now = fp.tell()
if expected_ds_start and fp_now != expected_ds_start:
logger.info("*** Group length for file meta dataset "
"did not match end of group 2 data ***")
else:
if debugging: logger.debug("--- End of file meta data found "
"as expected ---------")
return file_meta
def read_file_meta_info(filename):
"""Read and return the DICOM file meta information only.
This function is meant to be used in user code, for quickly going through
a series of files to find one which is referenced to a particular SOP,
without having to read the entire files.
"""
fp = DicomFile(filename, 'rb')
preamble = read_preamble(fp, False) # if no header, raise exception
return _read_file_meta_info(fp)
def read_preamble(fp, force):
"""Read and return the DICOM preamble and read past the 'DICM' marker.
If 'DICM' does not exist, assume no preamble, return None, and
rewind file to the beginning..
"""
logger.debug("Reading preamble...")
preamble = fp.read(0x80)
if dicom.debugging:
sample = bytes2hex(preamble[:8]) + "..." + bytes2hex(preamble[-8:])
logger.debug("{0:08x}: {1}".format(fp.tell()-0x80, sample))
magic = fp.read(4)
if magic != b"DICM":
if force:
logger.info("File is not a standard DICOM file; 'DICM' header is "
"missing. Assuming no header and continuing")
preamble = None
fp.seek(0)
else:
raise InvalidDicomError("File is missing 'DICM' marker. "
"Use force=True to force reading")
else:
logger.debug("{0:08x}: 'DICM' marker found".format(fp.tell()-4))
return preamble
def _at_pixel_data(tag, VR, length):
return tag == (0x7fe0, 0x0010)
def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
"""Parse a DICOM file until a condition is met
``read_partial`` is normally not called directly. Use ``read_file``
instead, unless you need to stop on some condition
other than reaching pixel data.
:arg fileobj: a file-like object. This function does not close it.
:arg stop_when: a callable which takes tag, VR, length,
and returns True or False.
If stop_when returns True,
read_data_element will raise StopIteration.
If None (default), then the whole file is read.
:returns: a set instance
"""
# Read preamble -- raise an exception if missing and force=False
preamble = read_preamble(fileobj, force)
file_meta_dataset = Dataset()
# Assume a transfer syntax, correct it as necessary
is_implicit_VR = True
is_little_endian = True
if preamble:
file_meta_dataset = _read_file_meta_info(fileobj)
transfer_syntax = file_meta_dataset.TransferSyntaxUID
if transfer_syntax == dicom.UID.ImplicitVRLittleEndian:
pass
elif transfer_syntax == dicom.UID.ExplicitVRLittleEndian:
is_implicit_VR = False
elif transfer_syntax == dicom.UID.ExplicitVRBigEndian:
is_implicit_VR = False
is_little_endian = False
elif transfer_syntax == dicom.UID.DeflatedExplicitVRLittleEndian:
# See PS3.6-2008 A.5 (p 71)
# when written, the entire dataset following
# the file metadata was prepared the normal way,
# then "deflate" compression applied.
# All that is needed here is to decompress and then
# use as normal in a file-like object
zipped = fileobj.read()
# -MAX_WBITS part is from comp.lang.python answer:
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
fileobj = BytesIO(unzipped) # a file-like object
is_implicit_VR = False
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE
# by Standard PS 3.5-2008 A.4 (p63)
is_implicit_VR = False
else: # no header -- use the is_little_endian, implicit assumptions
file_meta_dataset.TransferSyntaxUID = dicom.UID.ImplicitVRLittleEndian
try:
dataset = read_dataset(fileobj, is_implicit_VR, is_little_endian,
stop_when=stop_when, defer_size=defer_size)
except EOFError as e:
pass # error already logged in read_dataset
return FileDataset(fileobj, dataset, preamble, file_meta_dataset,
is_implicit_VR, is_little_endian)
def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
"""Read and parse a DICOM file
:param fp: either a file-like object, or a string containing the file name.
If a file-like object, the caller is responsible for closing it.
:param defer_size: if a data element value is larger than defer_size,
then the value is not read into memory until it is accessed in code.
Specify an integer (bytes), or a string value with units:
e.g. "512 KB", "2 MB".
Default None means all elements read into memory.
:param stop_before_pixels: Set True to stop before reading pixels
(and anything after them).
If False (default), the full file will be read and parsed.
:param force: Set to True to force reading even if no header is found.
If False, a dicom.filereader.InvalidDicomError is raised
when the file is not valid DICOM.
:returns: a FileDataset instance
"""
# Open file if not already a file object
caller_owns_file = True
if isinstance(fp, basestring):
# caller provided a file name; we own the file handle
caller_owns_file = False
logger.debug("Reading file '{0}'".format(fp))
fp = open(fp, 'rb')
if dicom.debugging:
logger.debug("\n"+"-"*80)
logger.debug("Call to read_file()")
msg = ("filename:'%s', defer_size='%s'"
", stop_before_pixels=%s, force=%s")
logger.debug(msg % (fp.name, defer_size, stop_before_pixels, force))
if caller_owns_file:
logger.debug("Caller passed file object")
else:
logger.debug("Caller passed file name")
logger.debug("-"*80)
# Convert size to defer reading into bytes, and store in file object
# if defer_size is not None:
# defer_size = size_in_bytes(defer_size)
# fp.defer_size = defer_size
# Iterate through all items and store them --include file meta if present
stop_when = None
if stop_before_pixels:
stop_when = _at_pixel_data
try:
dataset = read_partial(fp, stop_when, defer_size=defer_size,
force=force)
finally:
if not caller_owns_file:
fp.close()
# XXX need to store transfer syntax etc.
return dataset
def data_element_offset_to_value(is_implicit_VR, VR):
"""Return number of bytes from start of data element to start of value"""
if is_implicit_VR:
offset = 8 # tag of 4 plus 4-byte length
else:
if VR in extra_length_VRs:
offset = 12 # tag 4 + 2 VR + 2 reserved + 4 length
else:
offset = 8 # tag 4 + 2 VR + 2 length
return offset
def read_deferred_data_element(fileobj_type, filename, timestamp,
raw_data_elem):
"""Read the previously deferred value from the file into memory
and return a raw data element"""
logger.debug("Reading deferred element %r" % str(raw_data_elem.tag))
# If it wasn't read from a file, then return an error
if filename is None:
raise IOError("Deferred read -- original filename not stored. "
"Cannot re-open")
# Check that the file is the same as when originally read
if not os.path.exists(filename):
raise IOError("Deferred read -- original file "
"{0:s} is missing".format(filename))
if stat_available and (timestamp is not None):
statinfo = stat(filename)
if statinfo.st_mtime != timestamp:
warnings.warn("Deferred read warning -- file modification time "
"has changed.")
# Open the file, position to the right place
# fp = self.typefileobj(self.filename, "rb")
fp = fileobj_type(filename, 'rb')
is_implicit_VR = raw_data_elem.is_implicit_VR
is_little_endian = raw_data_elem.is_little_endian
offset = data_element_offset_to_value(is_implicit_VR, raw_data_elem.VR)
fp.seek(raw_data_elem.value_tell - offset)
elem_gen = data_element_generator(fp, is_implicit_VR, is_little_endian,
defer_size=None)
# Read the data element and check matches what was stored before
data_elem = next(elem_gen)
fp.close()
if data_elem.VR != raw_data_elem.VR:
raise ValueError("Deferred read VR {0:s} does not match "
"original {1:s}".format(data_elem.VR, raw_data_elem.VR))
if data_elem.tag != raw_data_elem.tag:
raise ValueError("Deferred read tag {0!r} does not match "
"original {1!r}".format(data_elem.tag, raw_data_elem.tag))
# Everything is ok, now this object should act like usual DataElement
return data_elem
| Python |
# dataelem.py
"""Define the DataElement class - elements within a dataset.
DataElements have a DICOM value representation VR, a value multiplicity VM,
and a value.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
from __future__ import absolute_import
import sys
from dicom import in_py3
import logging
logger = logging.getLogger('pydicom')
from dicom.datadict import dictionary_has_tag, dictionary_description
from dicom.datadict import private_dictionary_description, dictionaryVR
from dicom.tag import Tag
from dicom.UID import UID
from dicom.valuerep import IS, DS, PersonName
from decimal import Decimal
from collections import namedtuple
# os.stat is only available on Unix and Windows
# Not sure if on other platforms the import fails, or the call to it??
stat_available = True
try:
from os import stat
except:
stat_available = False
import os.path
from dicom.filebase import DicomFile
import warnings
# Helper functions:
def isMultiValue(value):
"""Helper function: return True if 'value' is 'list-like'."""
if isString(value):
return False
try:
iter(value)
except TypeError:
return False
return True
def isString(val):
"""Helper function: return True if val is a string."""
if in_py3:
return isinstance(val, str) or isinstance(val, bytes)
else:
return isinstance(val, basestring)
def isStringOrStringList(val):
"""Return true if val consists only of strings. val may be a list/tuple."""
if isMultiValue(val):
for item in val:
if not isString(item):
return False
return True
else: # single value - test for a string
return isString(val)
_backslash = "\\" # double '\' because it is used as escape chr in Python
class DataElement(object):
"""Contain and manipulate a Dicom data element, having a tag, VR, VM and value.
Most user code will not create data elements using this class directly,
but rather through 'named tags' in Dataset objects.
See the Dataset class for a description of how Datasets, Sequences,
and DataElements work.
Class Data
----------
For string display (via __str__), the following are used:
descripWidth -- maximum width of description field (default 35).
maxBytesToDisplay -- longer data will display "array of # bytes" (default 16).
showVR -- True (default) to include the dicom VR just before the value.
"""
descripWidth = 35
maxBytesToDisplay = 16
showVR = 1
def __init__(self, tag, VR, value, file_value_tell=None,
is_undefined_length=False):
"""Create a data element instance.
Most user code should instead use DICOM keywords, (formerly 'Named tags'
in pydicom) to create data_elements, for which only the value is supplied,
and the VR and tag are determined from the dicom dictionary.
tag -- dicom (group, element) tag in any form accepted by Tag().
VR -- dicom value representation (see DICOM standard part 6)
value -- the value of the data element. One of the following:
- a single string value
- a number
- a list or tuple with all strings or all numbers
- a multi-value string with backslash separator
file_value_tell -- used internally by Dataset, to store the write
position for ReplaceDataElementValue method
is_undefined_length -- used internally to store whether the length
field in this data element was 0xFFFFFFFFL, i.e. "undefined length"
"""
self.tag = Tag(tag)
self.VR = VR # Note!: you must set VR before setting value
self.value = value
self.file_tell = file_value_tell
self.is_undefined_length = is_undefined_length
@property
def value(self):
"""The value (possibly multiple values) of this data_element"""
return self._value
@value.setter
def value(self, val):
"""Set method for 'value' property"""
# Check if is a string with multiple values separated by '\'
# If so, turn them into a list of separate strings
if isString(val) and self.VR not in \
['UT','ST','LT', 'FL','FD','AT','OB','OW','OF','SL','SQ','SS',
'UL', 'OB/OW', 'OW/OB', 'OB or OW', 'OW or OB', 'UN'] and 'US' not in self.VR: # latter covers 'US or SS' etc
if _backslash in val:
val = val.split(_backslash)
self._value = self._convert_value(val)
@property
def VM(self):
"""The number of values in the data_element's 'value'"""
if isMultiValue(self.value):
return len(self.value)
else:
return 1
def _convert_value(self, val):
"""Convert Dicom string values if possible to e.g. numbers. Handle the case
of multiple value data_elements"""
if self.VR=='SQ': # a sequence - leave it alone
from dicom.sequence import Sequence
if isinstance(val,Sequence):
return val
else:
return Sequence(val)
# if the value is a list, convert each element
try:
val.append
except AttributeError: # not a list
return self._convert(val)
else:
returnvalue = []
for subval in val:
returnvalue.append(self._convert(subval))
return returnvalue
def _convert(self, val):
"""Take the value and convert to number, etc if possible"""
if self.VR == 'IS':
return IS(val)
elif self.VR == 'DS':
return DS(val)
elif self.VR == "UI":
return UID(val)
# Later may need this for PersonName as for UI,
# but needs more thought
# elif self.VR == "PN":
# return PersonName(val)
else: # is either a string or a type 2 optionally blank string
return val # this means a "numeric" value could be empty string ""
#except TypeError:
#print "Could not convert value '%s' to VR '%s' in tag %s" \
# % (repr(val), self.VR, self.tag)
#except ValueError:
#print "Could not convert value '%s' to VR '%s' in tag %s" \
# % (repr(val), self.VR, self.tag)
def __str__(self):
"""Return str representation of this data_element"""
repVal = self.repval
if self.showVR:
s = "%s %-*s %s: %s" % (str(self.tag), self.descripWidth,
self.description()[:self.descripWidth], self.VR, repVal)
else:
s = "%s %-*s %s" % (str(self.tag), self.descripWidth,
self.description()[:self.descripWidth], repVal)
return s
@property
def repval(self):
"""Return a str representation of the current value for use in __str__"""
if (self.VR in ['OB', 'OW', 'OW/OB', 'OW or OB', 'OB or OW', 'US or SS or OW', 'US or SS']
and len(self.value) > self.maxBytesToDisplay):
repVal = "Array of %d bytes" % len(self.value)
elif hasattr(self, 'original_string'): # for VR of IS or DS
repVal = repr(self.original_string)
elif isinstance(self.value, Decimal):
repVal = repr(self.value)
elif isinstance(self.value, UID):
repVal = self.value.name
else:
repVal = repr(self.value) # will tolerate unicode too
return repVal
def __unicode__(self):
"""Return unicode representation of this data_element"""
if isinstance(self.value, unicode):
# start with the string rep then replace the value part with the unicode
strVal = str(self)
uniVal = unicode(strVal.replace(self.repval, "")) + self.value
return uniVal
else:
return unicode(str(self))
def __getitem__(self, key):
"""Returns the item from my value's Sequence, if it is one."""
try:
return self.value[key]
except TypeError:
raise TypeError("DataElement value is unscriptable (not a Sequence)")
@property
def name(self):
return self.description()
def description(self):
"""Return the DICOM dictionary description for this dicom tag."""
if dictionary_has_tag(self.tag):
name = dictionary_description(self.tag)
elif self.tag.is_private:
name = "Private tag data" # default
if hasattr(self, 'private_creator'):
try:
# If have name from private dictionary, use it, but
# but put in square brackets so is differentiated,
# and clear that cannot access it by name
name = "[" + private_dictionary_description(self.tag, self.private_creator) + "]"
except KeyError:
pass
elif self.tag.elem >> 8 == 0:
name = "Private Creator"
elif self.tag.element == 0: # implied Group Length dicom versions < 3
name = "Group Length"
else:
name = ""
return name
def __repr__(self):
"""Handle repr(data_element)"""
if self.VR == "SQ":
return repr(self.value)
else:
return str(self)
class DeferredDataElement(DataElement):
"""Subclass of DataElement where value is not read into memory until needed"""
def __init__(self, tag, VR, fp, file_mtime, data_element_tell, length):
"""Store basic info for the data element but value will be read later
fp -- DicomFile object representing the dicom file being read
file_mtime -- last modification time on file, used to make sure
it has not changed since original read
data_element_tell -- file position at start of data element,
(not the start of the value part, but start of whole element)
"""
self.tag = Tag(tag)
self.VR = VR
self._value = None # flag as unread
# Check current file object and save info needed for read later
self.fp_is_implicit_VR = fp.is_implicit_VR
self.fp_is_little_endian = fp.is_little_endian
self.filepath = fp.name
self.file_mtime = file_mtime
self.data_element_tell = data_element_tell
self.length = length
@property
def repval(self):
if self._value is None:
return "Deferred read: length %d" % self.length
else:
return DataElement.repval.fget(self)
@property
def value(self):
"""Get method for 'value' property"""
# Must now read the value if haven't already
if self._value is None:
self.read_value()
return DataElement.value.fget(self)
@value.setter
def value(self, val):
DataElement.value.fset(self, val)
RawDataElement = namedtuple('RawDataElement',
'tag VR length value value_tell is_implicit_VR is_little_endian')
def DataElement_from_raw(raw_data_element):
"""Return a DataElement from a RawDataElement"""
from dicom.values import convert_value # XXX buried here to avoid circular import filereader->Dataset->convert_value->filereader (for SQ parsing)
raw = raw_data_element
VR = raw.VR
if VR is None: # Can be if was implicit VR
try:
VR = dictionaryVR(raw.tag)
except KeyError:
if raw.tag.is_private:
VR = 'OB' # just read the bytes, no way to know what they mean
elif raw.tag.element == 0: # group length tag implied in versions < 3.0
VR = 'UL'
else:
raise KeyError("Unknown DICOM tag {0:s} - can't look up VR".format(str(raw.tag)))
try:
value = convert_value(VR, raw)
except NotImplementedError as e:
raise NotImplementedError("{0:s} in tag {1!r}".format(str(e), raw.tag))
return DataElement(raw.tag, VR, value, raw.value_tell, raw.length==0xFFFFFFFF)
| Python |
# __init__.py for Dicom package
"""pydicom package -- easily handle DICOM files. See Quick Start below.
Copyright (c) 2008-2012 Darcy Mason
This file is part of pydicom, released under a modified MIT license.
See the file license.txt included with this distribution, also
available at http://pydicom.googlecode.com
-----------
Quick Start
-----------
1. A simple program to read a dicom file, modify a value, and write to a new file::
import dicom
dataset = dicom.read_file("file1.dcm")
dataset.PatientName = 'anonymous'
dataset.save_as("file2.dcm")
2. See the files in the examples directory that came with this package for more
examples, including some interactive sessions.
3. Learn the methods of the Dataset class; that is the one you will
work with most directly.
4. Questions/comments etc can be directed to the pydicom google group at
http://groups.google.com/group/pydicom
"""
import sys
if sys.version_info < (2,6,0):
raise ImportError("pydicom > 0.9.7 requires python 2.6 or later")
in_py3 = sys.version_info[0] > 2
# Set up logging system for the whole package.
# In each module, set logger=logging.getLogger('pydicom') and the same instance
# will be used by all
# At command line, turn on debugging for all pydicom functions with:
# import dicom
# dicom.debug()
# Turn off debugging with
# dicom.debug(False)
import logging
def debug(debug_on=True):
"""Turn debugging of DICOM file reading and writing on or off.
When debugging is on, file location and details about the elements read at
that location are logged to the 'pydicom' logger using python's logging module.
:param debug_on: True (default) to turn on debugging, False to turn off.
"""
global logger, debugging
if debug_on:
logger.setLevel(logging.DEBUG)
debugging = True
else:
logger.setLevel(logging.WARNING)
debugging = False
logger = logging.getLogger('pydicom')
handler = logging.StreamHandler()
# formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s", "%Y-%m-%d %H:%M") #'%(asctime)s %(levelname)s %(message)s'
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
debug(False) # force level=WARNING, in case logging default is set differently (issue 102)
# For convenience, import the read_file and write_file functions (most used)
# into the "dicom" namespace.
from dicom.filereader import read_file
from dicom.filewriter import write_file
__version__ = "1.0a"
__version_info__ = (1,0,0)
| Python |
# charlist.py
"""List summary info for the test files in the charset directory"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import logging
logging.basicConfig(level=logging.INFO,
format='%(message)s')
if __name__ == "__main__":
from glob import glob
import dicom
# Get list of all DICOM files
names = glob("*.dcm")
# Collect summary information from the files
files_info = []
for name in names:
ds = dicom.read_file(name)
ds.decode()
files_info.append((name, ds.SpecificCharacterSet, ds.PatientsName))
# Show the information
format = "%-16s %-40s %s"
logging.info(format % ("Filename", "Character Sets", "Patient's Name"))
logging.info(format % ("--------", "--------------", "--------------"))
for file_info in files_info:
logging.info(format % file_info)
if "chrFrenMulti.dcm" in names:
logging.info("\nOther\n=====")
logging.info(
"chrFrenMulti.dcm is a modified version of chrFren.dcm"
" with multi-valued PN and LO for testing decoding"
)
| Python |
# sequence.py
"""Hold the Sequence class, which stores a dicom sequence (list of Datasets)"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from dicom.dataset import Dataset
from dicom.multival import MultiValue
def validate_dataset(elem):
"""Ensures that the value is a Dataset instance"""
if not isinstance(elem, Dataset):
raise TypeError('Sequence contents must be a Dataset instance')
return elem
class Sequence(MultiValue):
"""Class to hold multiple Datasets in a list
This class is derived from MultiValue and as such enforces that all items
added to the list are Dataset instances. In order to due this, a validator
is substituted for type_constructor when constructing the MultiValue super
class
"""
def __init__(self, iterable=None):
"""Initialize a list of Datasets
:param iterable: an iterable (e.g. list, tuple) of Datasets. If no
value is provided, an empty Sequence is generated
"""
# We add this extra check to throw a relevant error. Without it, the
# error will be simply that a Sequence must contain Datasets (since a
# Dataset IS iterable). This error, however, doesn't inform the user
# that the actual issue is that their Dataset needs to be INSIDE an
# iterable object
if isinstance(iterable, Dataset):
raise TypeError('The Sequence constructor requires an iterable')
# If no inputs are provided, we create an empty Sequence
if not iterable:
iterable = list();
# validate_dataset is used as a pseudo type_constructor
super(Sequence, self).__init__(validate_dataset, iterable)
def __str__(self):
lines = [str(x) for x in self]
return "[" + "".join(lines) + "]"
def __repr__(self):
"""Sequence-specific string representation"""
formatstr = "<%(classname)s, length %(count)d, at %(id)X>"
return formatstr % {'classname':self.__class__.__name__, 'id':id(self), 'count':len(self)}
| Python |
# dump.py
"""Utility functions used in debugging writing and reading"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
from io import BytesIO
def print_character(ordchr):
"""Return a printable character, or '.' for non-printable ones."""
if 31 < ordchr < 126 and ordchr != 92:
return chr(ordchr)
else:
return '.'
def filedump(filename, start_address=0, stop_address=None):
"""Dump out the contents of a file to a standard hex dump 16 bytes wide"""
fp = file(filename, 'rb')
return hexdump(fp, start_address, stop_address)
def datadump(data):
stop_address = len(data) + 1
fp = BytesIO(data)
print(hexdump(fp, 0, stop_address))
def hexdump(file_in, start_address=0, stop_address=None, showAddress=True):
"""Return a formatted string of hex bytes and characters in data.
This is a utility function for debugging file writing.
file_in -- a file-like object to get the bytes to show from"""
str_out = BytesIO()
byteslen = 16*3-1 # space taken up if row has a full 16 bytes
blanks = ' ' * byteslen
file_in.seek(start_address)
data = True # dummy to start the loop
while data:
if stop_address and file_in.tell() > stop_address:
break
if showAddress:
str_out.write("%04x : " % file_in.tell()) # address at start of line
data = file_in.read(16)
if not data:
break
row = [ord(x) for x in data] # need ord twice below so convert once
byte_string = ' '.join(["%02x" % x for x in row]) # string of two digit hex bytes
str_out.write(byte_string)
str_out.write(blanks[:byteslen-len(byte_string)]) # if not 16, pad
str_out.write(' ')
str_out.write(''.join([print_character(x) for x in row])) # character rep of bytes
str_out.write("\n")
return str_out.getvalue()
def pretty_print(ds, indent=0, indent_chars=" "):
"""Print a dataset directly, with indented levels.
This is just like Dataset._pretty_str, but more useful for debugging as it
prints each item immediately rather than composing a string, making it
easier to immediately see where an error in processing a dataset starts.
"""
strings = []
indentStr = indent_chars * indent
nextIndentStr = indent_chars * (indent+1)
for data_element in ds:
if data_element.VR == "SQ": # a sequence
fmt_str = "{0:s}{1:s} {2:s} {3:d} item(s) ---"
new_str = fmt_str.format(indentStr, str(data_element.tag),
data_element.name, len(data_element.value))
print(new_str)
for dataset in data_element.value:
pretty_print(dataset, indent+1)
print(nextIndentStr + "---------")
else:
print(indentStr + repr(data_element))
if __name__ == "__main__":
import sys
filename = sys.argv[1]
start_address = 0
stop_address = None
if len(sys.argv) > 2: # then have start address
start_address = eval(sys.argv[2])
if len(sys.argv) > 3:
stop_address = eval(sys.argv[3])
print(filedump(filename, start_address, stop_address))
| Python |
# hexutil.py
"""Miscellaneous utility routines relating to hex and byte strings"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from binascii import a2b_hex, b2a_hex
from dicom import in_py3
def hex2bytes(hexstring):
"""Return bytestring for a string of hex bytes separated by whitespace
This is useful for creating specific byte sequences for testing, using
python's implied concatenation for strings with comments allowed.
Example:
hex_string = (
"08 00 32 10" # (0008, 1032) SQ "Procedure Code Sequence"
" 08 00 00 00" # length 8
" fe ff 00 e0" # (fffe, e000) Item Tag
)
byte_string = hex2bytes(hex_string)
Note in the example that all lines except the first must start with a space,
alternatively the space could end the previous line.
"""
return a2b_hex(hexstring.replace(" ", ""))
def bytes2hex(byte_string):
s = b2a_hex(byte_string)
if in_py3:
s = s.decode()
return " ".join(s[i:i+2] for i in range(0, len(s), 2))
| Python |
# __init__.py
| Python |
# multival.py
"""Code for multi-value data elements values, or any list of items that
must all be the same type.
"""
# Copyright (c) 2009-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT-style license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
#
class MultiValue(list):
"""Class to hold any multi-valued DICOM value, or any list of items
that are all of the same type.
This class enforces that any items added to the list are of the correct type,
by calling the constructor on any items that are added. Therefore, the
constructor must behave nicely if passed an object that is already its type.
The constructor should raise TypeError if the item cannot be converted.
"""
def __init__(self, type_constructor, iterable):
"""Initialize the list of values
:param type_constructor: a constructor for the required type for all list
items. Could be the class, or a factory function.
For DICOM mult-value data elements, this will be the
class or type corresponding to the VR.
:param iterable: an iterable (e.g. list, tuple) of items to initialize
the MultiValue list
"""
self.type_constructor = type_constructor
super(MultiValue, self).__init__([type_constructor(x) for x in iterable])
def append(self, val):
super(MultiValue, self).append(self.type_constructor(val))
def extend(self, list_of_vals):
super(MultiValue, self).extend((self.type_constructor(x) for x in list_of_vals))
def insert(self, position, val):
super(MultiValue, self).insert(position, self.type_constructor(val))
def __setitem__(self, i, val):
"""Set an item of the list, making sure it is of the right VR type"""
if isinstance(i, slice):
val = [self.type_constructor(x) for x in val]
else:
val = self.type_constructor(val)
super(MultiValue, self).__setitem__(i, val)
def __str__(self):
lines = [str(x) for x in self]
return "[" + ", ".join(lines) + "]"
__repr__ = __str__
| Python |
# version_dep.py
"""Holds test code that is dependent on certain python versions"""
# Copyright (c) 2009-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import warnings
def capture_warnings(function, *func_args, **func_kwargs):
"""Capture function result and warnings.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = function(*func_args, **func_kwargs)
all_warnings = w
return result, [str(warning.message) for warning in all_warnings]
| Python |
# time_test.py
"""Try reading large sets of files, profiling how much time it takes"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os.path
import os
import sys
on_windows = sys.platform.startswith("win")
# EDIT THIS SECTION --------------------------
# to point to local temp directory, and to a set of >400 DICOM files of same size to work on
# I used images freely available from http://pcir.org
if on_windows:
tempfile = "c:/temp/pydicom_stats"
location_base = r"z:/testdicom/"
else:
tempfile = "/tmp/pydicom_stats"
location_base = r"/Users/darcy/testdicom/"
# location_base = r"/Volumes/Disk 1/testdicom/" # Network disk location
locations = ["77654033_19950903/77654033/19950903/CT2/",
"98890234_20010101/98890234/20010101/CT5/",
"98890234_20010101/98890234/20010101/CT6/",
"98890234_20010101/98890234/20010101/CT7/",
]
locations = [os.path.join(location_base, location) for location in locations]
# -------------------------------------------------------
import glob
import dicom
from dicom.filereader import read_partial, _at_pixel_data
from io import BytesIO
from time import time
import cProfile
import pstats
import sys
import random
rp = read_partial
filenames = []
for location in locations:
loc_list = glob.glob(os.path.join(location, "*"))
filenames.extend((x for x in loc_list if not x.startswith(".")))
assert len(filenames) >= 400, "Need at least 400 files" # unless change slices below
print
random.shuffle(filenames) # to make sure no bias for any particular file
print "Sampling from %d files" % len(filenames), ". Each test gets 100 distinct files"
print "Test order is randomized too..."
# Give each test it's own set of files, to avoid reading something in cache from previous test
filenames1 = filenames[:100] # keep the time to a reasonable amount (~2-25 sec)
filenames2 = filenames[100:200]
filenames3 = filenames[200:300]
filenames4 = filenames[300:400]
def test_full_read():
rf = dicom.read_file
datasets = [rf(fn) for fn in filenames1]
return datasets
def test_partial():
rp = read_partial
ds = [rp(open(fn, 'rb'), stop_when=_at_pixel_data) for fn in filenames2]
def test_mem_read_full():
rf = dicom.read_file
str_io = BytesIO
memory_files = (str_io(open(fn, 'rb').read()) for fn in filenames3)
ds = [rf(memory_file) for memory_file in memory_files]
def test_mem_read_small():
rf = dicom.read_file
str_io = BytesIO # avoid global lookup, make local instead
memory_files = (str_io(open(fn, 'rb').read(4000)) for fn in filenames4)
ds = [rf(memory_file) for memory_file in memory_files]
def test_python_read_files():
all_files = [open(fn, 'rb').read() for fn in filenames4]
if __name__ == "__main__":
runs = ['datasets=test_full_read()',
# 'test_partial()',
# 'test_mem_read_full()',
# 'test_mem_read_small()',
'test_python_read_files()',
]
random.shuffle(runs)
for testrun in runs:
cProfile.run(testrun, tempfile)
p = pstats.Stats(tempfile)
print "---------------"
print testrun
print "---------------"
p.strip_dirs().sort_stats('time').print_stats(5)
print "Confirming file read worked -- check for data elements near end"
try:
image_sizes = [len(ds.PixelData) for ds in datasets]
except Exception as e:
print "Failed to access dataset data for all files\nError:" + str(e)
else:
print "Reads checked ok."
# Clear disk cache for next run?
import sys
if not on_windows:
prompt = "Run purge command (linux/Mac OS X) to clear disk cache?...(N):"
answer = raw_input(prompt)
if answer.lower() == "y":
print "Running 'purge'. Please wait..."
os.system("purge")
| Python |
# __init__.py
# Mark the folder as a python package
| Python |
# raw_convert_test.py
"""Try reading a large RTSTRUCT file, profiling how much time it takes"""
# Copyright (c) 2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os.path
import os
import sys
# EDIT THIS SECTION --------------------------
# to point to local temp directory
tempfile = "/tmp/pydicom_stats"
read_filename = r"/Users/darcy/hg/pydicom/source/dicom/testfiles/RStest.dcm"
write_filename = "/tmp/write_test.dcm"
import dicom
from io import BytesIO
import cProfile
import pstats
import sys
def test_full_read(filename):
dataset = dicom.read_file(filename)
return dataset
def test_convert_from_raw(dataset):
s = str(dataset)
def test_write_file(dataset, write_filename):
dataset.save_as(write_filename)
if __name__ == "__main__":
runs = ['ds=test_full_read(read_filename)',
'test_convert_from_raw(ds)',
'test_write_file(ds, write_filename)',
]
for testrun in runs:
cProfile.run(testrun, tempfile)
p = pstats.Stats(tempfile)
print "---------------"
print testrun
print "---------------"
p.strip_dirs().sort_stats('time').print_stats(8)
# Clear disk cache for next run?
# import sys
# if not on_windows:
# prompt= "Run purge command (linux/Mac OS X) to clear disk cache?(N):"
# answer = raw_input(prompt)
# if answer.lower() == "y":
# print "Running 'purge'. Please wait..."
# os.system("purge")
| Python |
# _write_stds.py
"""Snippets for what a particular dataset (including nested sequences)
should look like after writing in different expl/impl Vr and endian combos,
as well as undefined length sequences and items
"""
# Implicit VR, little endian, SQ's with defined lengths
impl_LE_deflen_std_hex = (
"10 00 10 00 " # (0010, 0010) Patient's Name
"0c 00 00 00 " # length 12
"4e 61 6d 65 5e 50 61 74 69 65 6e 74 " # "Name^Patient"
"06 30 39 00 " # (3006, 0039) ROI Contour Sequence
"5a 00 00 00 " # length 90
"fe ff 00 e0 " # (fffe, e000) Item Tag
"52 00 00 00 " # length 82
"06 30 40 00 " # (3006, 0040) Contour Sequence
"4a 00 00 00 " # length 74
"fe ff 00 e0 " # (fffe, e000) Item Tag
"1a 00 00 00 " # length 26
"06 30 48 00 " # (3006, 0048) Contour Number
"02 00 00 00 " # length 2
"31 20 " # "1 "
"06 30 50 00 " # (3006, 0050) Contour Data
"08 00 00 00 " # length 8
"32 5c 34 5c 38 5c 31 36 " # "2\4\8\16"
"fe ff 00 e0 " # (fffe, e000) Item Tag
"20 00 00 00 " # length 32
"06 30 48 00 " # (3006, 0048) Contour Number
"02 00 00 00 " # length 2
"32 20 " # "2 "
"06 30 50 00 " # (3006, 0050) Contour Data
"0e 00 00 00 " # length 14
"33 32 5c 36 34 5c 31 32 38 5c 31 39 36 20 "
# "32\64\128\196 "
)
# Implicit VR, big endian, SQ's with defined lengths
# Realized after coding this that there is no Impl VR big endian in DICOM std;
# however, it seems to exist as a GE private transfer syntax.
# Will leave this here for now.
impl_BE_deflen_std_hex = (
"00 10 00 10 " # (0010, 0010) Patient's Name
"00 00 00 0c " # length 12
"4e 61 6d 65 5e 50 61 74 69 65 6e 74 " # "Name^Patient"
"30 06 00 39 " # (3006, 0039) ROI Contour Sequence
"00 00 00 5a " # length 90
"ff fe e0 00 " # (fffe, e000) Item Tag
"00 00 00 52 " # length 82
"30 06 00 40 " # (3006, 0040) Contour Sequence
"00 00 00 4a " # length 74
"ff fe e0 00 " # (fffe, e000) Item Tag
"00 00 00 1a " # length 26
"30 06 00 48 " # (3006, 0048) Contour Number
"00 00 00 02 " # length 2
"31 20 " # "1 "
"30 06 00 50 " # (3006, 0050) Contour Data
"00 00 00 08 " # length 8
"32 5c 34 5c 38 5c 31 36 " # "2\4\8\16"
"ff fe e0 00 " # (fffe, e000) Item Tag
"20 00 00 00 " # length 32
"30 06 00 48 " # (3006, 0048) Contour Number
"00 00 00 02 " # length 2
"32 20 " # "2 "
"30 06 00 50 " # (3006, 0050) Contour Data
"00 00 00 0e " # length 14
"33 32 5c 36 34 5c 31 32 38 5c 31 39 36 20 "
# "32\64\128\196 "
)
| Python |
# run_tests.py
"""Call all the unit test files in the test directory starting with 'test'"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os
import os.path
import sys
import unittest
# Get the directory test_dir where the test scripts are
from pkg_resources import Requirement, resource_filename
test_dir = resource_filename(Requirement.parse("pydicom"), "dicom/test")
class MyTestLoader(object):
def loadTestsFromNames(self, *args):
# Simplest to change to directory where test_xxx.py files are
save_dir = os.getcwd()
if test_dir:
os.chdir(test_dir)
filenames = os.listdir(".")
module_names = [f[:-3] for f in filenames
if f.startswith("test") and f.endswith(".py")]
# Load all the tests
suite = unittest.TestSuite()
for module_name in module_names:
module_dotted_name = "dicom.test." + module_name
test = unittest.defaultTestLoader.loadTestsFromName(
module_dotted_name)
suite.addTest(test)
os.chdir(save_dir)
return suite
if __name__ == "__main__":
# Get the tests -- in format used by Distribute library
# to run under 'python setup.py test'
suite = MyTestLoader().loadTestsFromNames()
# Run the tests
verbosity = 1
args = sys.argv
if len(args) > 1 and (args[1] == "-v" or args[1] == "--verbose"):
verbosity = 2
runner = unittest.TextTestRunner(verbosity=verbosity)
# Switch directories to test DICOM files, used by many of the tests
save_dir = os.getcwd()
testfiles_dir = resource_filename(Requirement.parse("pydicom"),
"dicom/testfiles")
os.chdir(testfiles_dir)
runner.run(suite)
os.chdir(save_dir)
| Python |
# __init__.py
| Python |
# warncheck.py
#
import warnings
import unittest
from sys import version_info
from dicom.test.version_dep import capture_warnings
def assertWarns(self, warn_msg, function, *func_args, **func_kwargs):
"""
Check that the function generates the expected warning
with the arguments given.
warn_msg -- part of the warning string, any warnings should contain this
function -- the function to call (expected to issue a warning)
func_args -- positional arguments to the function
func_kwargs -- keyword arguments to the function
Return the function return value.
"""
result, all_warnings = capture_warnings(function, *func_args,
**func_kwargs)
msg = "Expected one warning; got {0:d}"
self.assert_(len(all_warnings) == 1, msg.format(len(all_warnings)))
msg = "Expected warning message '{0:s}...'; got '{1:s}'"
self.assert_(warn_msg in all_warnings[0],
msg.format(warn_msg, all_warnings[0]))
return result
def test_warning(the_warning):
if the_warning:
warnings.warn(the_warning)
class WarnTests(unittest.TestCase):
def testWarn(self):
"""Test that assertWarns works as expected"""
assertWarns(self, "Look", test_warning, "Look out")
if __name__ == "__main__":
unittest.main()
| Python |
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print "Copying from %s to %s" % (from_file, to_file)
indata = open(from_file).read()
print "The input file is %d bytes long." % len(indata)
print "Does the output file exist? %r" % exists(to_file)
out_file = open(to_file, 'w')
out_file.write(indata)
print "Alright, all DOne."
out_file.close() | Python |
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d jeans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crabapples." % secret_formula(start_point)
sentence = "All god\tthings come to those who weight."
| Python |
from sys import argv
script, file_name = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0)
def print_a_line(line_cnt, f):
print line_cnt, f.readline()
current_file = open(file_name)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line +=1
print_a_line(current_line, current_file)
current_line +=1
print_a_line(current_line, current_file) | Python |
from sys import argv
script, filename = argv
print " Going to erase %r." % filename
print "If you don't want to continue, hit CTRL+C."
print "If you do want that, hit any keys + RETURN"
raw_input("ready?")
print "Opening file ....."
target = open(filename, 'w')
print "Truncating this file..."
target.truncate()
print "now I'm going to ask you for 3 lines."
line1 = raw_input("line 1:")
line2 = raw_input("line 2:")
line3 = raw_input("line 3:")
print "I'm going to write these to the file."
target.write(line1)
target.write("\n\n")
target.write(line2)
target.write("\n\n")
target.write(line3)
target.write("\n\n")
print "And Finally, we close it."
target.close()
target = open(filename)
target.read()
target.close()
| Python |
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words) | Python |
from sys import argv
script, user_name = argv
prompt = '>> '
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask a few Questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "Where do you live %s?" % user_name
lives = raw_input(prompt)
print "What kind of computer do you have?"
computer = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice job!
""" % (likes, lives, computer)
| Python |
from sys import argv
script, filename = argv
txt = open(filename)
print "Here's your file %r:" % filename
print txt.read()
print "type the filename again:"
file_again = raw_input("> ")
txt_again = open(file_again)
#txt_again.write("--- added by write method ---")
print txt_again.read()
txt.close()
txt_again.close()
| Python |
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 -2 + 3 - 6
print "This is five -> %s" % five
def secret_formula(hwg):
jelly_beans = hwg * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 1000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 100
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point) | Python |
#!/usr/bin/env pypy
import os, sys, logging, re
import argparse
import fnmatch
configurations = {'lite', 'pro'}
package_dirs = {
'lite': ('src/cx/hell/android/pdfview',),
'pro': ('src/cx/hell/android/pdfviewpro',)
}
file_replaces = {
'lite': (
'cx.hell.android.pdfview.',
'"cx.hell.android.pdfview"',
'package cx.hell.android.pdfview;',
'android:icon="@drawable/pdfviewer"',
),
'pro': (
'cx.hell.android.pdfviewpro.',
'"cx.hell.android.pdfviewpro"',
'package cx.hell.android.pdfviewpro;',
'android:icon="@drawable/apvpro_icon"',
),
}
def make_comment(file_type, line):
"""Add comment to line and return modified line, but try not to add comments to already commented out lines."""
if file_type in ('java', 'c'):
return '// ' + line if not line.startswith('//') else line
elif file_type in ('html', 'xml'):
return '<!-- ' + line.strip() + ' -->\n' if not line.strip().startswith('<!--') else line
else:
raise Exception("unknown file type: %s" % file_type)
def remove_comment(file_type, line):
"""Remove comment from line, but only if line is commented, otherwise return unchanged line."""
if file_type in ('java', 'c'):
if line.startswith('// '): return line[3:]
else: return line
elif file_type in ('html', 'xml'):
if line.strip().startswith('<!-- ') and line.strip().endswith(' -->'): return line.strip()[5:-4] + '\n'
else: return line
else:
raise Exception("unknown file type: %s" % file_type)
def handle_comments(conf, file_type, lines, filename):
new_lines = []
re_cmd_starts = re.compile(r'(?:(//|<!--))\s+#ifdef\s+(?P<def>[a-zA-Z]+)')
re_cmd_ends = re.compile(r'(?:(//|<!--))\s+#endif')
required_defs = []
for i, line in enumerate(lines):
m = re_cmd_starts.search(line)
if m:
required_def = m.group('def')
logging.debug("line %s:%d %s matches as start of %s" % (filename, i+1, line.strip(), required_def))
required_defs.append(required_def)
new_lines.append(line)
continue
m = re_cmd_ends.search(line)
if m:
logging.debug("line %s:%d %s matches as endif" % (filename, i+1, line.strip()))
required_defs.pop()
new_lines.append(line)
continue
if len(required_defs) == 0:
new_lines.append(line)
elif len(required_defs) == 1 and required_defs[0] == conf:
new_line = remove_comment(file_type, line)
new_lines.append(new_line)
else:
new_line = make_comment(file_type, line)
new_lines.append(new_line)
assert len(new_lines) == len(lines)
return new_lines
def find_files(dirname, name):
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, name):
matches.append(os.path.join(root, filename))
return matches
def fix_package_dirs(conf):
for i, dirname in enumerate(package_dirs[conf]):
logging.debug("trying to restore %s" % dirname)
if os.path.exists(dirname):
if os.path.isdir(dirname):
logging.debug(" already exists")
continue
else:
logging.error(" %s already exists, but is not dir" % dirname)
continue
# find other name
found_dirname = None
for other_conf, other_dirnames in package_dirs.items():
other_dirname = other_dirnames[i]
if other_conf == conf: continue # skip this conf when looking for other conf
if os.path.isdir(other_dirname):
if found_dirname is None:
found_dirname = other_dirname
else:
# source dir already found :/
raise Exception("too many possible dirs for this package: %s, %s" % (found_dirname, other_dirname))
if found_dirname is None:
raise Exception("didn't find %s" % dirname)
# now rename found_dirname to dirname
os.rename(found_dirname, dirname)
logging.debug("renamed %s to %s" % (found_dirname, dirname))
def handle_comments_in_files(conf, file_type, filenames):
for filename in filenames:
lines = open(filename).readlines()
new_lines = handle_comments(conf, file_type, lines, filename)
if lines != new_lines:
logging.debug("file %s comments changed" % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
def replace_in_files(conf, filenames):
#logging.debug("about replace to %s in %s" % (conf, ', '.join(filenames)))
other_confs = [other_conf for other_conf in file_replaces.keys() if other_conf != conf]
#logging.debug("there are %d other confs to replace from: %s" % (len(other_confs), ', '.join(other_confs)))
for filename in filenames:
new_lines = []
lines = open(filename).readlines()
for line in lines:
new_line = line
for i, target_string in enumerate(file_replaces[conf]):
for other_conf in other_confs:
source_string = file_replaces[other_conf][i]
new_line = new_line.replace(source_string, target_string)
new_lines.append(new_line)
if new_lines != lines:
logging.debug("file %s changed, writing..." % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
else:
logging.debug("file %s didn't change, no need to rewrite" % filename)
def fix_java_files(conf):
filenames = find_files('src', name='*.java')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'java', filenames)
def fix_xml_files(conf):
filenames = find_files('.', name='*.xml')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'xml', filenames)
def fix_html_files(conf):
filenames = find_files('res', name='*.html')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'html', filenames)
def fix_c_files(conf):
filenames = find_files('jni/pdfview2', name='*.c')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
filenames = find_files('jni/pdfview2', name='*.h')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
def fix_resources(conf):
pass
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Switch project configurations')
parser.add_argument('--configuration', dest='configuration', default='lite')
args = parser.parse_args()
if not os.path.exists('AndroidManifest.xml'):
raise Exception('android manifest not found, please run this script from main project directory')
conf = args.configuration
if conf not in configurations:
raise Exception("invalid configuration: %s" % conf)
fix_package_dirs(conf)
fix_java_files(conf)
fix_xml_files(conf)
fix_html_files(conf)
fix_c_files(conf)
fix_resources(conf)
if __name__ == '__main__':
main()
| Python |
# initialize PETSC & SLEPC
import sys, petsc4py,slepc4py
slepc4py.init(sys.argv)
# load freefem tools
import freefem_bodyforce as ff_body
# load libraries
from numpy import *
import matplotlib.pyplot as plt
import h5py as h5
from petsc4py import PETSc
from slepc4py import SLEPc
from mpi4py import MPI
# load functions for stability analysis
import parfemstab as pfs
from parfemstab import Print,PrintRed,PrintGreen
# Set MUMPS as the linear solver
opts = PETSc.Options()
opts.setValue('st_ksp_type','preonly')
opts.setValue('st_pc_type','lu')
opts.setValue('st_pc_factor_mat_solver_package','mumps')
# Parallel info & print
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Get the directory where ff++ data is
di = opts.getString('dir')
PrintRed('Running tests in '+ di + '\n')
Print("Testing the interface for B q' = L q + B_2 f... \n")
# Build FreeFEMdisc from .dat files
Print('Loading discretization files ... ')
if rank == 0:
try:
ffdisc = ff_body.FreeFEMdisc_bodyforce(di+'/ffdata.h5')
Print('data loaded from .h5 file ... ')
except IOError:
ffdisc = ff_body.FreeFEMdisc_bodyforce(di+'/lin/')
ffdisc.SaveHDF5(di+'/ffdata.h5')
Print('data loaded from .dat file ... ')
# Get the projection operator on velocity DOFs
Pu = ffdisc.getPu(iu=[0,1])
Qr = Pu.transpose()*ffdisc.Q*Pu
h5f = h5.File(di+"/results.h5","w")
else:
ffdisc = ff_body.EmptyFreeFEMdisc()
Pu = None
Qr = None
PrintGreen('done \n')
# Create PETSC matrices
Print('Convert matrices to PETSC parallel format ... ')
Lmat = pfs.CSR2Mat(ffdisc.L)
Bmat = pfs.CSR2Mat(ffdisc.B)
B2mat = pfs.CSR2Mat(ffdisc.B2)
Pumat = pfs.CSR2Mat(Pu)
Qmat = pfs.CSR2Mat(ffdisc.Q)
Qrmat = pfs.CSR2Mat(Qr)
PrintGreen('done \n')
# Clear some space in memory
del ffdisc.L,ffdisc.B,ffdisc.B2,ffdisc.Q,Qr
# Compute optimal forcings
Print('Compute optimal forcings using SLEPC ... ')
omegas = linspace(0.05,2,10)
G = zeros(len(omegas)); idx = 0
for iomega in range(len(omegas)):
omega = omegas[iomega]
Print(' omega = %f'%omega)
# Set up the shell matrix and compute the factorizations
t1 = MPI.Wtime()
shell = pfs.OptimalForcings(Lmat,Bmat,B2mat,Pumat,Qmat,Qrmat,omega)
localsizes,globalsizes = Qrmat.getSizes()
FR = PETSc.Mat().create(comm)
FR.setSizes(globalsizes)
FR.setType('python')
FR.setPythonContext(shell)
FR.setUp()
t2 = MPI.Wtime()
Print(' CPU time to build FR object : %10.4g '%(t2-t1))
# Compute optimal perturbations
gains,fs,qs = pfs.OptimalForcingsSLEPc(FR,shell,1)
if rank == 0:
G[idx] = gains[0].real; idx +=1
grp = h5f.create_group("freq_%05d"%idx)
dset = grp.create_dataset('forcing',data=fs[:,0])
dset = grp.create_dataset('flow' ,data=qs[:,0])
grp.attrs['omega'] = omega
grp.attrs['gain'] = G[idx-1]
h5f.flush()
t1 = MPI.Wtime()
Print(' CPU time to solve the EVP : %10.4g '%(t1-t2))
if rank == 0:
h5f.close()
plt.figure()
plt.plot(omegas,G)
data = array([[0.04444444444444448,179.48133558103873],
[0.05714285714285714,205.06762710686837],
[0.13650793650793647,566.5202447230035 ],
[0.14603174603174598,629.5574531349081 ],
[0.22539682539682535,1729.5858443874629],
[0.23809523809523814,1998.2190123622177],
[0.4554112554112554,7492.005210421082 ],
[0.4874779541446206,7501.255814641612 ],
[0.6095238095238096,5613.014207297659 ],
[0.6857142857142857,4022.5514510637777 ],
[0.8888888888888888,1385.0935270147158 ],
[0.9904761904761905,803.793878772087 ],
[1.0730158730158732,532.9520080701366 ],
[1.1746031746031746,319.75927541335875 ],
[1.276190476190476,205.06762710686837 ],
[1.358730158730159,146.96116035346333 ],
[1.5555555555555556,75.47680448057177 ],
[1.6444444444444444,57.81730945850528 ],
[1.9301587301587306,29.36605846971029 ]])
plt.plot(data[:,0],data[:,1],'o')
plt.show()
PrintGreen('done \n')
| Python |
# initialize PETSC & SLEPC
import sys, petsc4py,slepc4py
slepc4py.init(sys.argv)
# load freefem tools
import freefem as ff
# load libraries
from numpy import *
import matplotlib.pyplot as plt
import h5py as h5
from petsc4py import PETSc
from slepc4py import SLEPc
from mpi4py import MPI
# load functions for stability analysis
import parfemstab as pfs
from parfemstab import Print,PrintRed,PrintGreen
# Set MUMPS as the linear solver
opts = PETSc.Options()
opts.setValue('ksp_type','preonly')
opts.setValue('pc_type','lu')
opts.setValue('pc_factor_mat_solver_package','mumps')
# Parallel info
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Get the directory where ff++ data is
di = opts.getString('dir')
PrintRed('Running tests in '+ di + '\n')
PrintRed("Testing time stepping routines ... \n")
# Build FreeFEMdisc from .dat files
Print('Loading discretization files ... ')
if rank == 0:
try:
ffdisc = ff.FreeFEMdisc(di+'/ffdata.h5')
Print('data loaded from .h5 file ... ')
except IOError:
ffdisc = ff.FreeFEMdisc(di+'/lin/')
ffdisc.SaveHDF5(di+'/ffdata.h5')
Print('data loaded from .dat file ... ')
# Get the projection operator on velocity DOFs
Pu = ffdisc.getPu(iu=[0,1])
else:
ffdisc = ff.EmptyFreeFEMdisc()
Pu = None
PrintGreen('done \n')
# Create PETSC matrices
Print('Convert matrices to PETSC parallel format ... ')
Lmat = pfs.CSR2Mat(ffdisc.L)
Bmat = pfs.CSR2Mat(ffdisc.B)
Pumat = pfs.CSR2Mat(Pu)
PrintGreen('done \n')
# Clear some space in memory
del ffdisc.L,ffdisc.B
# Compute modes using SLEPc
Print('Perform time stepping ... \n')
# Set the time step and scheme
cfl = opts.getReal('cfl',10)
cn = opts.getBool('cn',False)
if rank == 0:
hmin = ffdisc.GetHmin()
else:
hmin = 1.
hmin = comm.bcast(hmin,root=0)
dt = hmin * cfl
Print('Time stepping parameters ')
if cn:
Print('scheme : CN')
else:
Print('scheme : Euler')
Print('CFL : %g'%cfl)
Print('dt : %g'%dt)
x = Lmat.getVecRight()
Lx = Lmat.getVecRight()
y = Lmat.getVecRight()
LHy = Lmat.getVecRight()
lsize,gsize = x.getSizes()
localsizes,globalsizes = Lmat.getSizes()
t1 = MPI.Wtime()
# Time stepper
TS = pfs.TimeStepping(Lmat,Bmat,Pumat,dt)
t2 = MPI.Wtime()
Print(' CPU time to build TS object : %10.4g '%(t2-t1))
# random initial condition
v = random.rand(lsize) - .5
x.setArray(v)
norm = x.norm(); x.scale(1./norm)
v = random.rand(lsize) - .5
y.setArray(v)
norm = y.norm(); y.scale(1./norm)
Tf = 0.5
TS.setTf(Tf)
t1 = MPI.Wtime()
TS.mult(None,x,Lx)
t2 = MPI.Wtime()
Print(' CPU time to advance in time of %10.4g : %10.4g '%(Tf,t2-t1))
t1 = MPI.Wtime()
TS.multTranspose(None,y,LHy)
t2 = MPI.Wtime()
Print(' CPU time to advance in time of %10.4g : %10.4g '%(Tf,t2-t1))
dot1 = y.dot(Lx)
dot2 = LHy.dot(x)
Print('Error on the adjoint: %g'%abs(dot2 - dot1.conjugate()))
xv = pfs.Vec2DOF(x)
Lxv = pfs.Vec2DOF(Lx)
if rank == 0:
plt.figure()
plt.subplot(211)
ffdisc.PlotVar(xv,0)
plt.subplot(212)
ffdisc.PlotVar(Lxv,0)
plt.show()
| Python |
from numpy import *
import matplotlib.pyplot as plt
import scipy.sparse as sp
from numpy.linalg import norm
from petsc4py import PETSc
from slepc4py import SLEPc
from mpi4py import MPI
from petsc4py import PETSc
Print = PETSc.Sys.Print
# Print in color
REDBOLD_ = "\033[1;31m"
RED_ = "\033[31m"
GREEN_ = "\033[32m"
CYAN_ = "\033[36m"
YELLOW_ = "\033[33m"
CLRFORMAT_ = "\033[0m"
def PrintRed(s):
Print(RED_,s,CLRFORMAT_)
def PrintGreen(s):
Print(GREEN_,s,CLRFORMAT_)
tol_ev=1e-12
tol_fr=1e-12
def CSR2Mat(L):
"""
Converts a sequential scipy sparse matrix (on process 0) to a PETSc
Mat ('aij') matrix distributed on all processes
input : L, scipy sparse matrix on proc 0
output: PETSc matrix distributed on all procs
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Get the data from the sequential scipy matrix
if rank == 0:
if L.format == 'csr':
L2 = L
else:
L2 = L.tocsr()
Ai = L2.indptr
Aj = L2.indices
Av = L2.data
nnz = len(Aj)
n,m = L2.shape
else:
n = None
m = None
nnz = None
Ai = None
Aj = None
Av = None
# Broadcast sizes
n = comm.bcast(n ,root = 0)
m = comm.bcast(m ,root = 0)
nnz = comm.bcast(nnz,root = 0)
B = PETSc.Mat()
B.create(comm)
B.setSizes([n, m])
B.setType('aij')
B.setFromOptions()
# Create a vector to get the local sizes, so that preallocation can be done later
V = PETSc.Vec()
V.create(comm)
V.setSizes(n)
V.setFromOptions()
istart,iend = V.getOwnershipRange()
V.destroy()
nloc = iend - istart
Istart = comm.gather(istart,root = 0)
Iend = comm.gather(iend ,root = 0)
if rank == 0:
nnzloc = zeros(comm.size,'int')
for i in range(comm.size):
j0 = Ai[Istart[i]]
j1 = Ai[Iend [i]]
nnzloc[i] = j1 - j0
else:
nnzloc = None
nnzloc = comm.scatter(nnzloc,root = 0)
ai = zeros(nloc+1 ,PETSc.IntType)
aj = zeros(nnzloc+1 ,PETSc.IntType)
av = zeros(nnzloc+1 ,PETSc.ScalarType)
if rank == 0:
j0 = Ai[Istart[0]]
j1 = Ai[Iend [0]]
ai[:nloc ] = Ai[:nloc]
aj[:nnzloc] = Aj[j0:j1]
av[:nnzloc] = Av[j0:j1]
for iproc in range(1,comm.size):
if rank == 0:
i0 = Istart[iproc]
i1 = Iend [iproc]
j0 = Ai[i0]
j1 = Ai[i1]
comm.Send(Ai[i0:i1], dest=iproc, tag=77)
comm.Send(Aj[j0:j1], dest=iproc, tag=78)
comm.Send(Av[j0:j1], dest=iproc, tag=79)
elif rank == iproc:
comm.Recv(ai[:nloc ], source=0, tag=77)
comm.Recv(aj[:nnzloc], source=0, tag=78)
comm.Recv(av[:nnzloc], source=0, tag=79)
ai = ai- ai[0]
ai[-1] = nnzloc+1
B.setPreallocationCSR((ai,aj))
B.setValuesCSR(ai,aj,av)
B.assemble()
return B
def DOF2Vec(v):
"""
Converts a sequential vector of all degrees of freedom on process 0
to a distributed PETSc Vec
input : v, numpy array on proc 0
output: PETSc Vec distributed on all procs
"""
from petsc4py import PETSc
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
n = len(v)
x = PETSc.Vec()
x.create(comm)
x.setSizes(n)
x.setFromOptions()
istart,iend = x.getOwnershipRange()
nloc = iend - istart
Istart = comm.gather(istart,root = 0)
Iend = comm.gather(iend ,root = 0)
vloc = zeros(nloc,PETSc.ScalarType)
if rank == 0:
vloc[:nloc ] = v[:nloc]
for iproc in range(1,comm.size):
if rank == 0:
i0 = Istart[iproc]
i1 = Iend [iproc]
comm.Send(v[i0:i1], dest=iproc, tag=77)
elif rank == iproc:
comm.Recv(vloc, source=0, tag=77)
x.setArray(vloc)
return x
def Vec2DOF(x):
"""
Converts a a distributed PETSc Vec to a sequential vector of all
degrees of freedom on process 0
input : x, PETSc Vec distributed on all procs
output: numpy array on proc 0
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
vloc = x.getArray()
n = x.getSize()
istart,iend = x.getOwnershipRange()
nloc = iend - istart
Istart = comm.gather(istart,root = 0)
Iend = comm.gather(iend ,root = 0)
if rank == 0:
v = zeros(n,PETSc.ScalarType)
else:
v = None
if rank == 0:
v[:nloc ] = vloc
for iproc in range(1,comm.size):
if rank == 0:
i0 = Istart[iproc]
i1 = Iend [iproc]
comm.Recv(v[i0:i1], source=iproc, tag=77)
elif rank == iproc:
comm.Send(vloc, dest=0, tag=77)
return v
def DirectModeSLEPc(L,B,shift,nev):
"""
Computes generalized eigenvectors and eigenvalues for the problem
Lq = lambda Bq
using SLEPc
inputs : B,L, PETSc Mats
shift, scalar (same on all procs). Shift parameter for
the shift-invert method
nev, integer. Number of requested eigenvalues
outputs: omega, complex array(nconv). Conputed eigenvalues
modes, complex array(nconv,ndofs). Computed eigenvectors
residual, real array(nconv). Actual residuals for each mode
ALL OUTPUT ARE ONLY ON PROC 0 (=None on other procs)
TO DO: compute left eigenvectors (adjoint problem)
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
ev = L.getVecRight()
Lq = ev.duplicate()
Bq = ev.duplicate()
ndof = ev.getSize()
# Setup EPS
Print(" - Setting up the EPS and the ST")
SI = SLEPc.ST().create()
SI.setType(SLEPc.ST.Type.SINVERT)
SI.setOperators(L,B)
SI.setShift(shift)
SI.setFromOptions()
S = SLEPc.EPS();
S.create(comm)
S.setTarget(shift)
S.setWhichEigenpairs(SLEPc.EPS.Which.TARGET_MAGNITUDE)
S.setST(SI)
S.setDimensions(nev = nev,ncv = 60)
S.setTolerances(tol=tol_ev, max_it=100)
S.setFromOptions()
# Solve the EVP
Print(" - Solving the EPS")
S.solve()
its = S.getIterationNumber()
nconv = S.getConverged()
if rank == 0:
residual=zeros(nconv)
omega=zeros(nconv,'complex')
modes=zeros([ndof,nconv],'complex')
else:
residual = None
omega = None
modes = None
for i in range(nconv):
eigval = S.getEigenpair(i, ev)
L.mult(ev,Lq)
B.mult(ev,Bq)
Bq.aypx(-eigval,Lq)
res = Bq.norm()/ev.norm()
v = Vec2DOF(ev)
if rank == 0:
omega[i] = eigval/(-1j)
modes[:,i]= v
residual[i] = res
return omega,modes,residual
class OptimalPerturbations(object):
"""
Shell matrix for optimal perturbations computations with PETSc
"""
def __init__(self,L,B,Pu,Q,Qr,dt,CN=True):
"""
Parameters of the optimal perturbations computation:
L, PETSC Mat, discretization matrix
B, PETSC Mat, mass matrix
Q, PETSC Mat, norm matrix
Pu, PETSC Mat, converting velocity only vectors to full DOFs vector
Qr, PETSC Mat, norm matrix on the velovity-only space
(should be invertible)
Qr = Pu^T Q Pu
dt, real, time step
CN, bool, indicating whether Crank-Nicholson or backwards Euler is used
The object contains two KSP solvers, one for implicit time
stepping and one for the inversion of the norm matrix, that are initialized
when the object is initialized
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
self.L = L
self.B = B
self.Pu = Pu
self.Q = Q
self.Qr = Qr
if CN:
Print(' - temporal discretization: Crank-Nicholson')
else:
Print(' - temporal discretization: Backwards Euler')
Print(' - Setting the linear solvers')
self.OP = L.duplicate(copy=True)
if CN:
self.OP.scale(-dt/2.)
else:
self.OP.scale(-dt)
self.OP.axpy(1.,B)
# Linear solver for time stepping
self.ksp = PETSc.KSP()
self.ksp.create(comm)
self.ksp.setType('preonly')
self.ksp.getPC().setType('lu')
self.ksp.setOperators(self.OP)
self.ksp.setFromOptions()
self.ksp.getPC().setUp()
# self.ksp.view()
self.ksp2 = PETSc.KSP()
self.ksp2.create(comm)
self.ksp2.setType('preonly')
self.ksp2.getPC().setType('lu')
# self.ksp2.getPC().setType('cholesky')
self.ksp2.setOperators(self.Qr)
self.ksp2.setFromOptions()
self.ksp2.getPC().setUp()
# self.ksp2.view()
self.dt = dt
self.nt = 1
self.CN = CN
self.tmp = L.getVecRight()
self.tmp2 = L.getVecRight()
self.tmpr = Qr.getVecRight()
if self.CN:
self.tmp3 = L.getVecRight()
def setTf(self,Tf):
Tf = float(Tf)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
self.nt = int(Tf/self.dt)
Print(' - nt = %d'%self.nt)
Print(' - Tf = %f'%(self.dt*self.nt))
def mult(self,A,x,y):
self.Pu.mult(x,self.tmp2)
for it in range(self.nt):
self.B.mult(self.tmp2,self.tmp)
if self.CN:# and it>0:
self.L.mult(self.tmp2,self.tmp3)
# Mass conservation is imposed at time n+1
self.Pu.multTranspose(self.tmp3,self.tmpr)
self.Pu.mult(self.tmpr,self.tmp2)
self.tmp.axpy(self.dt/2.,self.tmp2)
self.ksp.solve(self.tmp,self.tmp2)
self.Q.mult(self.tmp2,self.tmp)
for it in range(self.nt):
self.tmp.conjugate()
self.ksp.solveTranspose(self.tmp,self.tmp2)
self.tmp2.conjugate()
self.B.multTranspose(self.tmp2,self.tmp)
if self.CN:# and it<(self.nt-1):
self.Pu.multTranspose(self.tmp2,self.tmpr)
self.Pu.mult(self.tmpr,self.tmp3)
self.tmp3.conjugate()
self.L.multTranspose(self.tmp3,self.tmp2)
self.tmp2.conjugate()
self.tmp.axpy(self.dt/2.,self.tmp2)
self.Pu.multTranspose(self.tmp,self.tmpr)
self.ksp2.solve(self.tmpr,y)
def PropagateIC(self,x):
self.Pu.mult(x,self.tmp2)
for it in range(self.nt):
self.B.mult(self.tmp2,self.tmp)
if self.CN:# and it>0:
self.L.mult(self.tmp2,self.tmp3)
# Mass conservation is imposed at time n+1
self.Pu.multTranspose(self.tmp3,self.tmpr)
self.Pu.mult(self.tmpr,self.tmp2)
self.tmp.axpy(self.dt/2.,self.tmp2)
self.ksp.solve(self.tmp,self.tmp2)
return self.tmp2
class TimeStepping(object):
"""
Shell matrix for time stepping with PETSc
"""
def __init__(self,L,B,Pu,dt,CN=True):
"""
Parameters of the time stepping:
L, PETSC Mat, discretization matrix
B, PETSC Mat, mass matrix
Pu, PETSC Mat, converting velocity only vectors to full DOFs vector.
Can be None for Euler
dt, real, time step
CN, bool, indicating whether Crank-Nicholson or backwards Euler is used
The object contains one KSP solver for implicit time
stepping that is initialized when the object is initialized
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
self.L = L
self.B = B
self.Pu = Pu
if CN:
Print(' - temporal discretization: Crank-Nicholson')
else:
Print(' - temporal discretization: Backwards Euler')
Print(' - Setting the linear solver')
self.OP = L.duplicate(copy=True)
if CN:
self.OP.scale(-dt/2.)
else:
self.OP.scale(-dt)
self.OP.axpy(1.,B)
# Linear solver
self.ksp = PETSc.KSP()
self.ksp.create(comm)
self.ksp.setType('preonly')
self.ksp.getPC().setType('lu')
self.ksp.setOperators(self.OP)
self.ksp.setFromOptions()
self.ksp.getPC().setUp()
self.dt = dt
self.nt = 1
self.CN = CN
self.tmp = L.getVecRight()
if self.CN:
self.tmp2 = L.getVecRight()
self.tmpr = Pu.getVecRight()
def setTf(self,Tf):
Tf = float(Tf)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
self.nt = int(Tf/self.dt)
Print(' - nt = %d'%self.nt)
Print(' - Tf = %f'%(self.dt*self.nt))
def mult(self,A,x,y):
x.copy(y)
for it in range(self.nt):
self.B.mult(y,self.tmp)
if self.CN:# and it>0:
self.L.mult(y,self.tmp2)
self.Pu.multTranspose(self.tmp2,self.tmpr)
self.Pu.mult(self.tmpr,self.tmp2)
self.tmp.axpy(self.dt/2.,self.tmp2)
self.ksp.solve(self.tmp,y)
def multTranspose(self,A,x,y):
x.copy(y)
for it in range(self.nt):
y.conjugate()
self.ksp.solveTranspose(y,self.tmp)
self.tmp.conjugate()
self.B.multTranspose(self.tmp,y)
if self.CN:# and it<(self.nt-1):
self.Pu.multTranspose(self.tmp,self.tmpr)
self.Pu.mult(self.tmpr,self.tmp)
self.tmp.conjugate()
self.L.multTranspose(self.tmp,self.tmp2)
self.tmp2.conjugate()
y.axpy(self.dt/2.,self.tmp2)
def OptimalPerturbationsSLEPc(TG,nev):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
ev = TG.getVecRight()
ndof = ev.getSize()
# Setup EPS
Print(' - Setting the EPS')
S = SLEPc.EPS();
S.create(comm)
S.setOperators(TG)
S.setDimensions(nev = nev,ncv = 6)
S.setTolerances(tol=1e-6, max_it=100)
S.setFromOptions()
# Solve the EVP
Print(" - Solving the EPS")
S.solve()
its = S.getIterationNumber()
nconv = S.getConverged()
if rank == 0:
eigvals=zeros(nconv,'complex')
modes =zeros([ndof,nconv],'complex')
else:
eigvals = None
modes = None
for i in range(nconv):
eigval = S.getEigenpair(i, ev)
v = Vec2DOF(ev)
if rank == 0:
eigvals[i] = eigval
modes[:,i]= v
return eigvals,modes
class OptimalForcings(object):
"""
Shell matrix for optimal perturbations computations with PETSc
"""
def __init__(self,L,B,B2,Pu,Q,Qr,omega):
"""
Parameters of the optimal perturbations computation:
L, PETSC Mat, discretization matrix
B, PETSC Mat, mass matrix
B2, PETSC Mat, 'mass' matrix corresponding to the forcing. This
can be used e.g. to restrict the forcing region
Q, PETSC Mat, norm matrix
Pu, PETSC Mat, converting velocity only vectors to full DOFs vector
Qr, PETSC Mat, norm matrix on the velovity-only space
(should be invertible)
Qr = Pu^T Q Pu
omega, real, frequency
The object contains two KSP solvers, one for resolvent computation
and one for the inversion of the norm matrix, that are initialized
when the object is initialized
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
self.L = L
self.B = B
self.B2 = B2
self.Pu = Pu
self.Q = Q
self.Qr = Qr
Print(' - Setting the linear solver')
self.OP = B.duplicate(copy=True)
self.OP.scale(1j*omega)
self.OP.axpy(1.,L)
# Linear solver for time stepping
self.ksp = PETSc.KSP()
self.ksp.create(comm)
self.ksp.setOptionsPrefix('OP_')
self.ksp.setType('preonly')
self.ksp.getPC().setType('lu')
self.ksp.setOperators(self.OP)
self.ksp.setFromOptions()
self.ksp.getPC().setUp()
# self.ksp.view()
self.ksp.setOptionsPrefix('Q_')
self.ksp2 = PETSc.KSP()
self.ksp2.create(comm)
self.ksp2.setType('cg')
self.ksp2.getPC().setType('ilu')
self.ksp2.setOperators(self.Qr)
self.ksp2.setFromOptions()
self.ksp2.getPC().setUp()
# self.ksp2.view()
self.tmp = L.getVecRight()
self.tmp2 = L.getVecRight()
self.tmpr = Qr.getVecRight()
def mult(self,A,x,y):
self.Pu.mult(x,self.tmp2)
self.B2.mult(self.tmp2,self.tmp)
self.ksp.solve(self.tmp,self.tmp2)
self.Q.mult(self.tmp2,self.tmp)
self.tmp.conjugate()
self.ksp.solveTranspose(self.tmp,self.tmp2)
self.tmp2.conjugate()
self.B2.multTranspose(self.tmp2,self.tmp)
self.Pu.multTranspose(self.tmp,self.tmpr)
self.ksp2.solve(self.tmpr,y)
def OptimalForcingsSLEPc(FR,shell,nev):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
f = FR.getVecRight()
q = shell.L.getVecRight()
tmp = shell.L.getVecRight()
ndof_f = f.getSize()
ndof_q = q.getSize()
# Setup EPS
Print(' - Setting the EPS')
S = SLEPc.EPS();
S.create(comm)
S.setOperators(FR )
S.setDimensions(nev = nev,ncv = 10)
S.setTolerances(tol=1e-6, max_it=100)
S.setFromOptions()
# Solve the EVP
Print(" - Solving the EPS")
S.solve()
its = S.getIterationNumber()
nconv = S.getConverged()
if rank == 0:
eigvals = zeros(nconv,'complex')
fs = zeros([ndof_f,nconv],'complex')
qs = zeros([ndof_q,nconv],'complex')
else:
eigvals = None
fs = None
qs = None
for i in range(nconv):
eigval = S.getEigenpair(i, f)
shell.Pu.mult(f,q)
shell.B2.mult(q,tmp)
shell.ksp.solve(tmp,q)
q.scale(-1.0)
vf = Vec2DOF(f)
vq = Vec2DOF(q)
if rank == 0:
eigvals[i] = eigval
fs[:,i] = vf
qs[:,i] = vq
return eigvals,fs,qs
| Python |
# initialize PETSC & SLEPC
import sys, petsc4py,slepc4py
slepc4py.init(sys.argv)
# load freefem tools
import freefem as ff
# load libraries
from numpy import *
import matplotlib.pyplot as plt
import h5py as h5
from petsc4py import PETSc
from slepc4py import SLEPc
from mpi4py import MPI
# load functions for stability analysis
import parfemstab as pfs
from parfemstab import Print,PrintRed,PrintGreen
# Set MUMPS as the linear solver
opts = PETSc.Options()
opts.setValue('st_ksp_type','preonly')
opts.setValue('st_pc_type','lu')
opts.setValue('st_pc_factor_mat_solver_package','mumps')
# Parallel info
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Get the directory where ff++ data is
di = opts.getString('dir')
PrintRed('Running tests in '+ di + '\n')
PrintRed("Testing the standand interface for B q' = L q ... \n")
# Build FreeFEMdisc from .dat files
Print('Loading dat files ... ')
if rank == 0:
ffdisc = ff.FreeFEMdisc(di+'/lin/')
else:
ffdisc = ff.EmptyFreeFEMdisc()
PrintGreen('done \n')
# Save as HDF5
Print('Saving as .h5 file ... ')
if rank == 0:
ffdisc.SaveHDF5(di+'/ffdata.h5')
PrintGreen('done \n')
del ffdisc
# Loading from .h5 file
Print('Loading from .h5 file ... ')
if rank == 0:
ffdisc = ff.FreeFEMdisc(di+'/ffdata.h5')
else:
ffdisc = ff.EmptyFreeFEMdisc()
PrintGreen('done \n')
# Create PETSC matrices
Print('Convert matrices to PETSC parallel format ... ')
Lmat = pfs.CSR2Mat(ffdisc.L)
Bmat = pfs.CSR2Mat(ffdisc.B)
PrintGreen('done \n')
# Clear some space in memory
del ffdisc.L,ffdisc.B
# Compute modes using SLEPc
Print('Compute eigenmodes ... ')
iomega = linspace(0,2.,5)
if rank == 0:
plt.figure()
f = open(di+'/spectrum.dat','w')
for omega0 in iomega:
shift = -1j*omega0
Print(' shift : (%+10.4g,%+10.4g) '%(shift.real,shift.imag))
nev = 20
t1 = MPI.Wtime()
omegas,modes,residuals = pfs.DirectModeSLEPc(Lmat,Bmat,shift,nev)
t2 = MPI.Wtime()
Print(' CPU time : %10.4g '%(t2-t1))
if rank == 0:
plt.scatter(omegas.real,omegas.imag)
for om in omegas:
f.write('%+10.4g %+10.4g\n'%(om.real,om.imag))
PrintGreen('done \n')
if rank == 0:
plt.show()
f.close()
| Python |
import os
from numpy import *
try:
import readmat
except ImportError:
os.system('f2py -m readmat -c readmat.f90 --fcompiler=gnu95')
import readmat
import scipy.sparse as sp
import matplotlib.pyplot as plt
import matplotlib.tri as tri
class FreeFEMdisc():
"""
object that contains all information about the FreeFem++
discretization
GENERAL INFORMATION
ndof : integer, nomber of degrees of freedom
ntot : integer, total number of discretization elements
(including those that are 0 due to BCs)
newind : integer array (ntot), contain the new index of a
point when only DOFs are kept, or -1 if a dirichlet
BC is applied at that point
nvar : integer, nomber of variables (i.e. ux, uy, p,...)
n : integer array (nvar): number of DOFs in each variable
n0 : integer array (nvar): number of elements in each variable
np1 : integer, number of elements on the P1 mesh
np2 : integer, number of elements on the P2 mesh
idof [ivar] : integer array (ndof), indicating which DOFs correspond
to variable 'ivar'
idofi [ivar] : integer array (ntot), indicating which element correspond
to variable 'ivar'
itot [ivar] : integer array (np1 or np2), indicating the correspondance
between elements and DOFs of variable ivar
vartype [ivar] : string array, indicating if the field is discretized
P1 or P2 elements
varorder[ivar] : order or the elements of variable 'ivar' relative to the
corresponding mesh
MESHES
meshp1 : matplotlib.tri.Triangulation, P1 mesh
meshp2 : matplotlib.tri.Triangulation, P2 mesh
MATRICES (containing anly DOFs)
L : real or complex scipy.sparse CSR matrix,
B : real or complex scipy.sparse CSR matrix, mass matrix
Q : real or complex scipy.sparse CSR matrix, inner product
matrix
q0 : real array, base flow state vector
"""
def __init__(self,di,dibase = None):
"""
Initilize the object using either
- the text files written by FreeFem++ in folder 'di' (the
base flow data should be in 'di'/../base/ unless given
in dibase)
- an *.h5 file obtained using function SaveHDF5
"""
if di[-3:] == '.h5':
self.LoadHDF5(di)
else:
if dibase == None:
dibase = di + '../base/'
self.LoadDatFiles(di,dibase)
def LoadDatFiles(self,di,dibase):
ls = os.listdir(di)
# Find out which components of the state vector correspond to Dirichlet BCs
if 'BC.dat' in ls:
BCmat,self.newind = self.LoadBC(di+'/BC.dat')
self.ndof = len(nonzero(self.newind != -1)[0])
else:
raise IOError("Cannot find BC.dat in "+repr(di))
# Read Matrices
if 'LNS.dat' in ls:
mat = self.LoadMat(di+'/LNS.dat')
self.L = BCmat.transpose() * mat * BCmat
else:
raise IOError("Cannot find LNS.dat in "+repr(di))
if 'B.dat' in ls:
mat = self.LoadMat(di+'/B.dat')
self.B = BCmat.transpose() * mat * BCmat
else:
raise IOError("Cannot find B.dat in "+repr(di))
if 'Q.dat' in ls:
mat = self.LoadMat(di+'/Q.dat')
self.Q = BCmat.transpose() * mat * BCmat
else:
raise IOError("Cannot find Q.dat in "+repr(di))
# Find the number of variables
print ''
tmp = loadtxt(di+'/dofs.dat')
nvar = int(tmp[:,0].max() + 1)
print 'Number of variables : ',nvar
self.nvar = nvar
self.ntot = len(tmp[:,0])
self.LoadVars(tmp)
# qdof[idof[i][:]] => array of dofs corresponding to field i = qidof
# qtot[itot[i][:]] => array of elements corresponding to field i = qitot
# qidof = qitot[idofi[i][:]]
# Meshes
try:
meshtri1,meshpts1 = self.LoadMesh(dibase+'/connectivity.dat',dibase+'/coordinates.dat')
except IOError:
raise IOError('Cannot find '+ dibase+'/connectivity.dat and '+dibase+'/coordinates.dat')
try:
meshtri2,meshpts2 = self.LoadMesh(dibase+'/connectivity-2.dat',dibase+'/coordinates-2.dat')
except IOError:
raise IOError('Cannot find '+ dibase+'/connectivity-2.dat and '+dibase+'/coordinates-2.dat')
self.np1 = len(meshpts1[:,0])
self.np2 = len(meshpts2[:,0])
self.vartype=[]
for i in range(self.nvar):
if self.n0[i] == self.np1:
self.vartype.append('p1')
print ' Variable # %2d : %6d ndof. Type: %s'%(i,self.n[i],'p1')
elif self.n0[i] == self.np2:
self.vartype.append('p2')
print ' Variable # %2d : %6d ndof. Type: %s'%(i,self.n[i],'p2')
else:
print self.n0[i], self.np1,self.np2,self.n[i]
raise ValueError('Neither P1 nor P2')
self.meshp1 = tri.Triangulation(meshpts1[:,0],meshpts1[:,1],meshtri1)
xyp1 = []
for i in range(self.np1):
xyp1.append((meshpts1[i,0],meshpts1[i,1]))
xyp1 = array(xyp1,dtype=[('x', 'float'), ('y', 'float')])
self.meshp2 = tri.Triangulation(meshpts2[:,0],meshpts2[:,1],meshtri2)
xyp2 = []
for i in range(self.np2):
xyp2.append((meshpts2[i,0],meshpts2[i,1]))
xyp2 = array(xyp2,dtype=[('x', 'float'), ('y', 'float')])
# Associate DOFs with mesh points
self.varorder=[]
for i in range(self.nvar):
indi = argsort(self.xydof[i],order=['x','y'])
if self.vartype[i] == 'p1':
indm = argsort(xyp1,order=['x','y'])
elif self.vartype[i] == 'p2':
indm = argsort(xyp2,order=['x','y'])
ii = argsort(indi)
iii = ii[indm]
iiii = argsort(indi[iii])
self.varorder.append(indi[iiii])
# Load base flow
self.q0 = loadtxt(di+'/base.dat')
def Save(self,name):
"""
Save the FreeFEMdisc object using the pickle module in file 'name'
It can then be re-loaded using pickle.load
This is less efficient than using the HDF5 IO
"""
import pickle
f = open(name,'w')
pickle.dump(self,f)
f.close()
def SaveHDF5(self,fname):
"""
Save the FreeFEMdisc object using HDF5 in file 'fname'.
It can be loaded when initializing an object
"""
self.SaveHDF5_base(fname)
def SaveHDF5_base(self,fname):
"""
Save the FreeFEMdisc object using HDF5 in file 'fname'.
It can be loaded when initializing an object
"""
def savemath5(f,mat,gname):
grp = f.create_group(gname)
mat = mat.tocsr()
dset = grp.create_dataset('shape' ,data=mat.shape )
dset = grp.create_dataset('indices' ,data=mat.indices )
dset = grp.create_dataset('indptr' ,data=mat.indptr )
dset = grp.create_dataset('data' ,data=mat.data )
def savemesth5(f,msh,gname):
grp = f.create_group(gname)
dset = grp.create_dataset('x' ,data=msh.x )
dset = grp.create_dataset('y' ,data=msh.y )
dset = grp.create_dataset('triangles' ,data=msh.triangles )
import h5py as h5
os.system('rm -f '+fname)
file=h5.File(fname)
# save general information
grpgen = file.create_group('general')
dset = grpgen.create_dataset('ndof' ,data=self.ndof )
dset = grpgen.create_dataset('ntot' ,data=self.ntot )
dset = grpgen.create_dataset('newind' ,data=self.newind )
dset = grpgen.create_dataset('nvar' ,data=self.nvar )
dset = grpgen.create_dataset('n' ,data=self.n )
dset = grpgen.create_dataset('n0' ,data=self.n0 )
dset = grpgen.create_dataset('np1' ,data=self.np1 )
dset = grpgen.create_dataset('np2' ,data=self.np2 )
# save dof information
grpdof = file.create_group('dof')
for ivar in range(self.nvar):
grp = grpdof.create_group('dof_%d'%ivar)
dset = grp.create_dataset('idof' ,data=self.idof [ivar])
dset = grp.create_dataset('idofi' ,data=self.idofi [ivar])
dset = grp.create_dataset('itot' ,data=self.itot [ivar])
dset = grp.create_dataset('vartype' ,data=self.vartype [ivar])
dset = grp.create_dataset('varorder' ,data=self.varorder[ivar])
dset = grp.create_dataset('xydof' ,data=self.xydof [ivar])
# save meshes
savemesth5(file,self.meshp1,'meshp1')
savemesth5(file,self.meshp2,'meshp2')
# save matrices
savemath5(file,self.L ,'L')
savemath5(file,self.B ,'B')
savemath5(file,self.Q ,'Q')
# save base flow
grpbase = file.create_group('base')
dset = grpbase.create_dataset('q0' ,data=self.q0)
file.close()
def LoadHDF5(self,fname):
def loadmath5(f,gname):
shape = f[gname+'/shape' ].value
indices = f[gname+'/indices'].value
indptr = f[gname+'/indptr' ].value
data = f[gname+'/data' ].value
return sp.csr_matrix((data, indices, indptr), shape=(shape[0], shape[1]))
def loadmesth5(f,gname):
x = f[gname+'/x' ].value
y = f[gname+'/y' ].value
triangles = f[gname+'/triangles'].value
return tri.Triangulation(x,y,triangles)
import h5py as h5
file=h5.File(fname,'r')
# load general information
self.ndof = file['general/ndof' ].value
self.ntot = file['general/ntot' ].value
self.newind = file['general/newind' ].value
self.nvar = file['general/nvar' ].value
self.n = file['general/n' ].value
self.n0 = file['general/n0' ].value
self.np1 = file['general/np1' ].value
self.np2 = file['general/np2' ].value
# load dof information
self.idof = []
self.idofi = []
self.itot = []
self.vartype = []
self.varorder = []
self.xydof = []
for ivar in range(self.nvar):
self.idof .append(file['dof/dof_%d/idof' %ivar].value)
self.idofi .append(file['dof/dof_%d/idofi' %ivar].value)
self.itot .append(file['dof/dof_%d/itot' %ivar].value)
self.vartype .append(file['dof/dof_%d/vartype' %ivar].value)
self.varorder .append(file['dof/dof_%d/varorder'%ivar].value)
self.xydof .append(file['dof/dof_%d/xydof' %ivar].value)
# load meshes
self.meshp1 = loadmesth5(file,'meshp1')
self.meshp2 = loadmesth5(file,'meshp2')
# load matrices
self.L = loadmath5(file,'L')
self.B = loadmath5(file,'B')
self.Q = loadmath5(file,'Q')
# load base flow
self.q0 = file['base/q0'].value
file.close()
def LoadMat(self,name):
print 'Reading file',name
f = open(name, 'r')
rr = f.readlines(5)
f.close()
# Read the matrix size
str=rr[3]
w=str.split(' ')
#remove blanks
data=[]
for i in range(size(w)):
if len(w[i]) != 0:
data.append(w[i])
n =int(data[0])
m =int(data[1])
nnz=int(data[3])
print " Matrix size:",n,'*',m,', nnz',nnz
# Determine if the matrix is real or complex
str=rr[4]
w=str.split(' ')
data=[]
for j in range(size(w)):
if len(w[j]) != 0:
data.append(w[j])
tmp = data[2].split(',')
if len(tmp) == 2:
print ' Type : complex'
icoo,jcoo,dcoo = readmat.readcomplexmat(name,nnz)
else:
print ' Type : real'
icoo,jcoo,dcoo = readmat.readrealmat(name,nnz)
icoo = icoo - 1
jcoo = jcoo - 1
ijcoo = [icoo,jcoo]
# Create COO matrix
mat=sp.coo_matrix((dcoo,ijcoo),shape=(n,m))
del dcoo,ijcoo,icoo,jcoo
# Convert to CSR format
mat=mat.tocsc()
return mat
def LoadBC(self,name):
tmp=loadtxt(name)
n=size(tmp)
ind=0
ival=[]
jval=[]
dval=[]
new_ind=zeros(n,'int')
for i in range(n):
if abs(tmp[i]) < 1e-10:
ival.append(i)
jval.append(ind)
dval.append(1.)
new_ind[i]=ind
ind+=1
else:
new_ind[i]=-1
dcoo=array(dval,'complex')
ijcoo=[array(ival,'int'),array(jval,'int')]
# Create COO matrix
mat=sp.coo_matrix((dcoo,ijcoo),shape=(n,ind))
# Convert to CSR format
mat=mat.tocsc()
return mat,new_ind
def LoadVars(self,tmp):
idof = []
itot = []
idofi = []
ind = []
xydof = []
for i in range(self.nvar):
idof .append([])
itot .append([])
idofi.append([])
xydof.append([])
ind .append(0)
# Fill the lists
for i in range(self.ntot):
ii = int(tmp[i,0])
if self.newind[i]!=-1:
idof[ii].append(self.newind[i])
itot[ii].append(ind[ii])
idofi[ii].append(i)
ind[ii]+=1
xydof[ii].append((tmp[i,1],tmp[i,2]))
# Convert lists to arays
self.n = []
self.n0 = []
for i in range(self.nvar):
idof [i] = array(idof [i],'int')
itot [i] = array(itot [i],'int')
idofi[i] = array(idofi[i],'int')
xydof[i] = array(xydof[i],dtype=[('x', 'float'), ('y', 'float')])
self.n.append (len(idof [i]))
self.n0.append(len(idofi[i]))
self.n = array(self.n)
self.n0 = array(self.n0)
assert (self.ndof == sum(self.n))
self.idof = idof
self.itot = itot
self.idofi = idofi
self.xydof = xydof
def LoadMesh(self,triname,ptsname):
triangles = loadtxt(triname)
triangles -= 1
pts = loadtxt(ptsname)
return triangles,pts
def PlotVar(self,q,i,simcolor=True,ncontours = 20,contours = None,plot = True,returnc = False,fill = True,**kwargs):
"""
Creates a contour plot
- 'q' is a state vector than can be real or complex (in which
case teh real part is plotted) and can contain points where
Dirichlet boundary conditions are applied
- 'i' is the index of the field
- 'simcolor' if true, use a symmetric colorscale
- 'contours' if provided, the contours to be drawn
- 'ncontours' if provided, the number of contours to be drawn
- 'plot' if true, the data in plotted (useful to onle get the
field)
- **kwargs are arguments for the contourf function
returns a vector that contains the value of field 'i' at each
mesh point
"""
if self.vartype[i] == 'p1':
v = zeros(self.np1,dtype(q[0]))
else:
v = zeros(self.np2,dtype(q[0]))
if len(q) == self.ndof:
qui = q[self.idof[i]]
v[self.itot[i]] = qui
v = v[self.varorder[i]]
elif len(q) == self.ntot:
v = q[self.idofi[i]]
v = v[self.varorder[i]]
else:
raise ValueError("wrong size")
if plot:
if simcolor:
Mx = max(abs(v))
mx = -Mx
else:
Mx = max(v)
mx = min(v)
if contours == None:
contours = linspace(mx,Mx,ncontours)
if self.vartype[i] == 'p1':
if fill:
c = plt.tricontourf(self.meshp1,v.real,contours,**kwargs)
else:
c = plt.tricontour(self.meshp1,v.real,contours,**kwargs)
if self.vartype[i] == 'p2':
if fill:
c = plt.tricontourf(self.meshp2,v.real,contours,**kwargs)
else:
c = plt.tricontour(self.meshp2,v.real,contours,**kwargs)
if returnc:
return v,c
else:
return v
def GetValue(self,q,i,xi,yi):
"""
Get the value of a field at given points
- 'q' is a state vector than can be real or complex (in which
case teh real part is plotted) and can contain points where
Dirichlet boundary conditions are applied
- 'i' is the index of the field
- 'xi' and 'yi' are the coordinates of the points
returns the values
"""
from scipy.interpolate import griddata
if self.vartype[i] == 'p1':
v = zeros(self.np1,dtype(q[0]))
x = self.meshp1.x
y = self.meshp1.y
else:
v = zeros(self.np2,dtype(q[0]))
x = self.meshp2.x
y = self.meshp2.y
if len(q) == self.ndof:
qui = q[self.idof[i]]
v[self.itot[i]] = qui
v = v[self.varorder[i]]
elif len(q) == self.ntot:
v = q[self.idofi[i]]
v = v[self.varorder[i]]
else:
raise ValueError("wrong size")
# zi = griddata((x, y), v, (xi[None,:], yi[:,None]), method='cubic')
zi = griddata((x, y), v, (xi, yi), method='cubic')
return zi
def GetHmin(self):
edges = self.meshp1.edges
x = self.meshp1.x
y = self.meshp1.y
nedges,n = edges.shape
hmin = 1e3
for i in range(nedges):
h12 = (x[edges[i,0]]-x[edges[i,1]])**2 + \
(y[edges[i,0]]-y[edges[i,1]])**2
hmin = min(hmin,sqrt(h12))
return hmin
def GetDt(self,cfl,iu):
edges = self.meshp2.edges
x = self.meshp2.x
y = self.meshp2.y
utot = zeros(self.np2)
for i in iu:
tmp = self.q0[self.idofi[i]]**2
utot += tmp[self.varorder[i]]
utot = sqrt(utot)
hmin = 1e3*ones(self.np2)
nedges,n = edges.shape
for i in range(nedges):
p0 = edges[i,0]
p1 = edges[i,1]
h = sqrt((x[p0]-x[p1])**2 + (y[p0]-y[p1])**2)
hmin[p0] = min(hmin[p0],2*h)
hmin[p1] = min(hmin[p1],2*h)
utot = hmin*cfl#/utot
print hmin.min()
return utot
def SaveFieldFF(self,q,fname):
for i in range(self.nvar):
f = open(fname+'_%d.dat'%i,'w')
if self.vartype[i] == 'p1':
v = zeros(self.np1,dtype(q[0]))
f.write("%e \n"%self.np1)
else:
v = zeros(self.np2,dtype(q[0]))
f.write("%e \n"%self.np2)
if len(q) == self.ndof:
qui = q[self.idof[i]]
v[self.itot[i]] = qui
elif len(q) == self.ntot:
v = q[self.idofi[i]]
else:
raise ValueError("wrong size")
for j in range(len(v)):
f.write("(%e,%e)\n"%(v[j].real,v[j].imag))
f.close()
def getPu(self,iu,mask = None):
"""
Computes a scipy sparse matrix (sequential) that takes as as input a
reduced vector defined only for some fields and perhaps for some spatial
region and returns a vector of DOFs where the other elements are set to
0
inputs : iu, integer array, containing the list of fields on which the
reduced vector is defined
mask, function of (x,y), true if the reduced vector should be
defined at (x,y). None if always true
outputs: Pu, scipy sparse matrix
TO DO: PETSC // version
"""
ival = []
jval = []
dval = []
nu = 0
for ivar in iu:
for i in range(self.n[ivar]):
x,y=self.xydof[ivar][self.itot[ivar][i]]
if mask == None or mask(x,y):
ival.append(self.idof[ivar][i])
jval.append(nu);
dval.append(1.)
nu += 1
dcoo = array(dval,'complex')
ijcoo = [array(ival,'int'),array(jval,'int')]
# Create COO matrix
Pu = sp.coo_matrix((dcoo,ijcoo),shape=(self.ndof,nu))
# Convert to CSR format
Pu = Pu.tocsc()
return Pu
class EmptyFreeFEMdisc():
def __init__(self):
self.L = None
self.B = None
self.Q = None
| Python |
from freefem import *
class FreeFEMdisc_bodyforce(FreeFEMdisc):
"""
object that contains all information about the FreeFem++
discretization
GENERAL INFORMATION
ndof : integer, nomber of degrees of freedom
ntot : integer, total number of discretization elements
(including those that are 0 due to BCs)
newind : integer array (ntot), contain the new index of a
point when only DOFs are kept, or -1 if a dirichlet
BC is applied at that point
nvar : integer, nomber of variables (i.e. ux, uy, p,...)
n : integer array (nvar): number of DOFs in each variable
n0 : integer array (nvar): number of elements in each variable
np1 : integer, number of elements on the P1 mesh
np2 : integer, number of elements on the P2 mesh
idof [ivar] : integer array (ndof), indivating which DOFs correspond
to variable 'ivar'
idofi [ivar] : integer array (ntot), indivating which element correspond
to variable 'ivar'
itot [ivar] : integer array (np1 or np2), indicating teh correspondance
between elements and DOFs of variable ivar
vartype [ivar] : string array, indicating if the field is discretized
P1 or P2 elements
varorder[ivar] : order or the elements of variable 'ivar' relative to the
corresponding mesh
MESHES
meshp1 : matplotlib.tri.Triangulation, P1 mesh
meshp2 : matplotlib.tri.Triangulation, P2 mesh
MATRICES (containing anly DOFs)
L : real or complex scipy.sparse CSR matrix,
B : real or complex scipy.sparse CSR matrix, mass matrix
B2 : real or complex scipy.sparse CSR matrix, forcing mass
matrix
Q : real or complex scipy.sparse CSR matrix, inner product
matrix
q0 : real array, base flow state vector
"""
def __init__(self,di,dibase = None):
"""
Initilize the object using either
- the text files written by FreeFem++ in folder 'di' (the
base flow data should be in 'di'/../base/ unless given
in dibase)
- an *.h5 file obtained using function SaveHDF5
"""
if di[-3:] == '.h5':
self.LoadHDF5(di)
self.LoadHDF5_bodyforce(di)
else:
if dibase == None:
dibase = di + '../base/'
self.LoadDatFiles(di,dibase)
self.LoadDatFiles_bodyforce(di,dibase)
def LoadDatFiles_bodyforce(self,di,dibase):
ls = os.listdir(di)
BCmat,self.newind = self.LoadBC(di+'/BC.dat')
if 'B2.dat' in ls:
mat = self.LoadMat(di+'/B2.dat')
self.B2 = BCmat.transpose() * mat * BCmat
else:
raise IOError("Cannot find B2.dat in "+repr(di))
def SaveHDF5(self,fname):
"""
Save the FreeFEMdisc object using HDF5 in file 'fname'.
It can be loaded when initializing an object
"""
self.SaveHDF5_base(fname)
self.SaveHDF5_bodyforce(fname)
def SaveHDF5_bodyforce(self,fname):
"""
Save the FreeFEMdisc object using HDF5 in file 'fname'.
It can be loaded when initializing an object
"""
def savemath5(f,mat,gname):
grp = f.create_group(gname)
mat = mat.tocsr()
dset = grp.create_dataset('shape' ,data=mat.shape )
dset = grp.create_dataset('indices' ,data=mat.indices )
dset = grp.create_dataset('indptr' ,data=mat.indptr )
dset = grp.create_dataset('data' ,data=mat.data )
import h5py as h5
file=h5.File(fname)
# save matrices
savemath5(file,self.B2,'B2')
file.close()
def LoadHDF5_bodyforce(self,fname):
def loadmath5(f,gname):
shape = f[gname+'/shape' ].value
indices = f[gname+'/indices'].value
indptr = f[gname+'/indptr' ].value
data = f[gname+'/data' ].value
return sp.csr_matrix((data, indices, indptr), shape=(shape[0], shape[1]))
import h5py as h5
file=h5.File(fname,'r')
self.B2 = loadmath5(file,'B2')
file.close()
| Python |
# initialize PETSC & SLEPC
import sys, petsc4py,slepc4py
slepc4py.init(sys.argv)
# load freefem tools
import freefem_boundaryforce as ff_boundary
# load libraries
from numpy import *
import matplotlib.pyplot as plt
import h5py as h5
from petsc4py import PETSc
from slepc4py import SLEPc
from mpi4py import MPI
# load functions for stability analysis
import parfemstab as pfs
# Set MUMPS as the linear solver
opts = PETSc.Options()
opts.setValue('st_ksp_type','preonly')
opts.setValue('st_pc_type','lu')
opts.setValue('st_pc_factor_mat_solver_package','mumps')
# Parallel info & print
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
Print = PETSc.Sys.Print
# Print in color
REDBOLD_ = "\033[1;31m"
RED_ = "\033[31m"
GREEN_ = "\033[32m"
CYAN_ = "\033[36m"
YELLOW_ = "\033[33m"
CLRFORMAT_ = "\033[0m"
def PrintRed(s):
Print(RED_,s,CLRFORMAT_)
def PrintGreen(s):
Print(GREEN_,s,CLRFORMAT_)
# Get the directory where ff++ data is
di = opts.getString('dir')
PrintRed('Running tests in '+ di + '\n')
Print("Testing the interface for B q' = L q with boundary forcing... \n")
# Build FreeFEMdisc from .dat files
Print('Loading dat files ... ')
if rank == 0:
ffdisc = ff_boundary.FreeFEMdisc_boundaryforce(di+'/lin/')
else:
ffdisc = ff_boundary.EmptyFreeFEMdisc()
PrintGreen('done \n')
# Save as HDF5
Print('Saving as .h5 file ... ')
if rank == 0:
ffdisc.SaveHDF5(di+'/ffdata.h5')
PrintGreen('done \n')
# Loading from .h5 file
del ffdisc
Print('Loading from .h5 file ... ')
if rank == 0:
ffdisc = ff_boundary.FreeFEMdisc_boundaryforce(di+'/ffdata.h5')
else:
ffdisc = ff_boundary.EmptyFreeFEMdisc()
PrintGreen('done \n')
# Create PETSC matrices
Print('Convert matrices to PETSC parallel format ... ')
Lmat = pfs.CSR2Mat(ffdisc.L)
Bmat = pfs.CSR2Mat(ffdisc.B)
B2mat = pfs.CSR2Mat(ffdisc.B2)
Qmat = pfs.CSR2Mat(ffdisc.Q)
Pumat = pfs.CSR2Mat(ffdisc.Pf)
Qrmat = pfs.CSR2Mat(ffdisc.Qf)
PrintGreen('done \n')
# Clear some space in memory
del ffdisc.L,ffdisc.B,ffdisc.B2,ffdisc.Q,ffdisc.Pf,ffdisc.Qf
# Compute optimal forcings
Print('Compute optimal forcings using SLEPC ... ')
omegas = [0.5]
G = zeros(len(omegas)); idx = 0
for iomega in range(len(omegas)):
omega = omegas[iomega]
Print(' omega = %f'%omega)
# Set up the shell matrix and compute the factorizations
t1 = MPI.Wtime()
shell = pfs.OptimalForcings(Lmat,Bmat,B2mat,Pumat,Qmat,Qrmat,omega)
localsizes,globalsizes = Qrmat.getSizes()
FR = PETSc.Mat().create(comm)
FR.setSizes(globalsizes)
FR.setType('python')
FR.setPythonContext(shell)
t2 = MPI.Wtime()
Print(' CPU time to build FR object : %10.4g '%(t2-t1))
# Compute optimal perturbations
gains,fs,qs = pfs.OptimalForcingsSLEPc(FR,shell,1)
G[idx] = sqrt(gains[0].real); idx +=1
t1 = MPI.Wtime()
Print(' CPU time to solve the EVP : %10.4g '%(t1-t2))
# Plot example
if rank == 0:
f = fs[:,0]
q = qs[:,0]
plt.figure()
plt.subplot(221)
xx,ff = ffdisc.PlotBoundaryVar(f,0,ax='y',add=((0,0),(1,0)))
yy = xx; xx = -10 + 0*yy
bval = ffdisc.GetValue(q,0,xx,yy)
plt.plot(bval,yy,'+')
mx = abs(ff.real).max()
plt.subplot(222)
ffdisc.PlotVar(q,0)
plt.colorbar()
plt.subplot(223)
xx,ff = ffdisc.PlotBoundaryVar(f,1,ax='y',add=((0,0),(1,0)))
yy = xx; xx = -10 + 0*yy
bval = ffdisc.GetValue(q,1,xx,yy)
plt.plot(bval,yy,'+')
plt.subplot(224)
ffdisc.PlotVar(q,1)
plt.colorbar()
plt.show()
plt.show()
PrintGreen('done \n')
| Python |
# load freefem tools
import freefem_bodyforce as ff_body
# load libraries
from numpy import *
import matplotlib.pyplot as plt
import h5py as h5
di = '../../step'
ffdisc = ff_body.FreeFEMdisc_bodyforce(di+'/ffdata.h5')
h5f = h5.File(di+"/results.h5","r")
Pu,nu = ffdisc.getPu([0,1])
idx = 1
while "freq_%05d"%idx in h5f:
f = h5f["freq_%05d/forcing"%idx].value
q = h5f["freq_%05d/flow" %idx].value
f2 = Pu*f
grp = h5f["freq_%05d"%idx]
w = grp.attrs['omega']
G = grp.attrs['gain']
print idx,w,G
idx +=1
plt.figure()
plt.subplot(2,1,1)
ffdisc.PlotVar(q,0)
plt.subplot(2,1,2)
ffdisc.PlotVar(f2,0)
nq = vdot(q,ffdisc.Q*q)
nf = vdot(f2,ffdisc.Q*f2)
print nq / nf
plt.show()
| Python |
from numpy import *
import matplotlib.pyplot as plt
import scipy.sparse as sp
import scipy.sparse.linalg as splin
# import scipy.sparse.linalg.eigen.arpack as arpie
# import scipy.io as io
# import progressbar as pbar
# import matplotlib.tri as tri
# import freefem as ff
from numpy.linalg import norm
from scipy.sparse.linalg import eigs, ArpackNoConvergence
tol_ev=1e-10
tol_fr=1e-12
def Pu(ffdisc,iu):
# Define the matrix of the projection on the velocity space
# Can be made more efficient...
ival = []
jval = []
dval = []
nu = 0
for ivar in iu:
for i in range(ffdisc.n[ivar]):
ival.append(ffdisc.idof[ivar][i])
jval.append(nu + i);
dval.append(1.)
nu += ffdisc.n[ivar]
dcoo = array(dval,'complex')
ijcoo = [array(ival,'int'),array(jval,'int')]
# Create COO matrix
Pu = sp.coo_matrix((dcoo,ijcoo),shape=(ffdisc.ndof,nu))
# Convert to CSR format
Pu = Pu.tocsc()
return Pu,nu
def FR(ffdisc,omega,Pu,nu,nev):
# Assemble operators
Q = Pu.transpose()*ffdisc.Q*Pu
Q = Q.tocsc()
print 'Build LU decomposition of Q'
LUQ = splin.splu(Q, permc_spec=3)
OP = ffdisc.L+1j*omega*ffdisc.B
OP = OP.tocsc()
print 'Build LU decomposition of (L+iwB)'
LU = splin.splu(OP, permc_spec=3)
print 'done'
def op(x):
y = Pu*x
z = ffdisc.B2*y
y = LU.solve(z)
z = ffdisc.Q*y
y = LU.solve(z,trans='H')
z = ffdisc.B2.transpose()*y
y = Pu.transpose()*z
w = LUQ.solve(y,trans='H')
return w
SOP = splin.LinearOperator((nu,nu),matvec=op,dtype='complex')
try:
w,v = splin.eigs(SOP, k=nev, M=None, sigma=None, which='LM', ncv=20, maxiter=100, tol=tol_fr, return_eigenvectors=True)
except ArpackNoConvergence,err:
w = err.eigenvalues
v = err.eigenvectors
print 'not fully converged'
nconv = size(w)
sigma = sqrt(w.real)
f = zeros([ffdisc.ndof,nconv],'complex')
q = zeros([ffdisc.ndof,nconv],'complex')
for k in range(nconv):
f[:,k] = Pu*v[:,k]
z = ffdisc.B2*f[:,k]
q[:,k] = -LU.solve(z)
return sigma,f,q
def DirectMode(ffdisc,shift,nev):
OP=ffdisc.L-shift*ffdisc.B
OP= OP.tocsc()
print 'Build LU decomposition of (L-sB)'
LU=splin.splu(OP, permc_spec=3)
print 'done.'
def op(x):
r=ffdisc.B*x
z=LU.solve(r)
return z
print 'define SOP'
SOP=splin.LinearOperator((ffdisc.ndof,ffdisc.ndof),matvec=op,dtype='complex')
print 'done.'
# Compute modes
print 'Calling eigs'
try:
w,v=splin.eigs(SOP, k=nev, M=None, sigma=None, which='LM', v0=None, ncv=60, maxiter=100, tol=tol_ev)
print 'done.'
except ArpackNoConvergence,err:
w = err.eigenvalues
v = err.eigenvectors
print 'not fully converged'
nconv=size(w)
omega=zeros(nconv,'complex')
modes=zeros([ffdisc.ndof,nconv],'complex')
for i in range(nconv):
omega[i]=(1./w[i]+shift)/(-1j)
modes[:,i]=v[:,i]
return omega,modes
def TS(ffdisc,dt,tf,q0):
OP = ffdisc.B - dt*ffdisc.L
OP= OP.tocsc()
print 'Build LU decomposition of (B - L/dt)'
LU=splin.splu(OP, permc_spec=3)
print 'done.'
nt = floor(tf/dt) +1
dt = tf / nt
q1 = q0.copy()
for i in range(nt):
print i,'/',nt
t1 = -ffdisc.B*q1
q1 = LU.solve(t1)
return q1
def CSR2Mat(L):
from petsc4py import PETSc
if L.format == 'csr':
L2 = L
else:
L2 = L.tocsr()
B = PETSc.Mat();
B.createAIJ(L2.shape,csr = (L2.indptr,
L2.indices,
L2.data))
return B
def DOF2Vec(v):
from petsc4py import PETSc
n = len(v)
x = PETSc.Vec()
x.createSeq(n)
x.setArray(v)
return x
def Vec2DOF(x):
v = x.getArray()
return v
def DirectModeSLEPc(ffdisc,shift,nev):
from petsc4py import PETSc
from slepc4py import SLEPc
# Operators
print 'Set operators'
Lmat = CSR2Mat(ffdisc.L)
Bmat = CSR2Mat(ffdisc.B)
# Setup EPS
print 'Set solver'
S = SLEPc.EPS();
S.create()
S.setTarget(shift)
S.setWhichEigenpairs(SLEPc.EPS.Which.TARGET_MAGNITUDE)
SI = SLEPc.ST().create()
SI.setType(SLEPc.ST.Type.SINVERT)
SI.setOperators(Lmat,Bmat)
SI.setShift(shift)
S.setST(SI)
S.setDimensions(nev = nev,ncv = 60)
S.setTolerances(tol=tol_ev, max_it=100)
S.setFromOptions()
# Solve the EVP
print 'Solving EVP'
S.solve()
its = S.getIterationNumber()
nconv = S.getConverged()
omega=zeros(nconv,'complex')
modes=zeros([ffdisc.ndof,nconv],'complex')
ev = Lmat.getVecRight()
for i in range(nconv):
eigval = S.getEigenpair(i, ev)
v = Vec2DOF(ev)
omega[i] = eigval/(-1j)
modes[:,i]= v
return omega,modes
| Python |
from freefem import *
class FreeFEMdisc_boundaryforce(FreeFEMdisc):
"""
object that contains all information about the FreeFem++
discretization
GENERAL INFORMATION
ndof : integer, nomber of degrees of freedom
ntot : integer, total number of discretization elements
(including those that are 0 due to BCs)
newind : integer array (ntot), contain the new index of a
point when only DOFs are kept, or -1 if a dirichlet
BC is applied at that point
nvar : integer, nomber of variables (i.e. ux, uy, p,...)
n : integer array (nvar): number of DOFs in each variable
n0 : integer array (nvar): number of elements in each variable
np1 : integer, number of elements on the P1 mesh
np2 : integer, number of elements on the P2 mesh
idof [ivar] : integer array (ndof), indivating which DOFs correspond
to variable 'ivar'
idofi [ivar] : integer array (ntot), indivating which element correspond
to variable 'ivar'
itot [ivar] : integer array (np1 or np2), indicating teh correspondance
between elements and DOFs of variable ivar
vartype [ivar] : string array, indicating if the field is discretized
P1 or P2 elements
varorder[ivar] : order or the elements of variable 'ivar' relative to the
corresponding mesh
MESHES
meshp1 : matplotlib.tri.Triangulation, P1 mesh
meshp2 : matplotlib.tri.Triangulation, P2 mesh
MATRICES (containing anly DOFs)
L : real or complex scipy.sparse CSR matrix,
B : real or complex scipy.sparse CSR matrix, mass matrix
Q : real or complex scipy.sparse CSR matrix, inner product
matrix
q0 : real array, base flow state vector
BOUNDARY FORCING
bcforcing : bool, indicating whether forcing is applied on the boundary
if true, only FR studies can be performed and the following variables are set
forcingind : indes of the DOFs where forcing is applied
B2 : projection from data on the inflow to dofs everywhere
Qf : norm of the forcing (1d integral)
Pf : projection onto the forcing space
xf,rf : positions associated with the forcing
ivarf : variable number for each forcing DOF
"""
def __init__(self,di,dibase = None):
"""
Initilize the object using either
- the text files written by FreeFem++ in folder 'di' (the
base flow data should be in 'di'/../base/ unless given
in dibase)
- an *.h5 file obtained using function SaveHDF5
"""
if di[-3:] == '.h5':
self.LoadHDF5(di)
self.LoadHDF5_boundaryforce(di)
else:
if dibase == None:
dibase = di + '../base/'
self.LoadDatFiles(di,dibase)
self.LoadDatFiles_boundaryforce(di,dibase)
def LoadDatFiles_boundaryforce(self,di,dibase):
BCmat,self.newind = self.LoadBC(di+'/BC.dat')
ls = os.listdir(di)
if 'BC_forcing.dat' in ls:
self.forcingind = self.LoadForcingBC(di+'/BC_forcing.dat')
self.forcingind = self.newind[self.forcingind]
nodir = nonzero(self.forcingind != -1)
self.forcingind = self.forcingind[nodir]
mat = self.LoadMat(di+'/Qf.dat')
self.Qf = BCmat.transpose() * mat * BCmat
self.Pf = self.GetPf(self.forcingind)
self.Qf = self.Pf.transpose()*self.Qf*self.Pf
else:
raise IOError("Cannot find BC_forcing.dat in "+repr(di))
self.B2 = BCmat.transpose() * BCmat
self.SetBC(self.L,self.forcingind,-1.)
self.SetBC(self.B,self.forcingind,0.)
tmp = loadtxt(di+'/dofs.dat')
ikeep = nonzero(self.newind != -1)[0]
x = tmp[:,1][ikeep]
r = tmp[:,2][ikeep]
ivar = tmp[:,0][ikeep]
self.xf = x[self.forcingind]
self.rf = r[self.forcingind]
self.ivarf = ivar[self.forcingind]
def SaveHDF5(self,fname):
"""
Save the FreeFEMdisc object using HDF5 in file 'fname'.
It can be loaded when initializing an object
"""
self.SaveHDF5_base(fname)
self.SaveHDF5_boundaryforce(fname)
def SaveHDF5_boundaryforce(self,fname):
"""
Save the FreeFEMdisc object using HDF5 in file 'fname'.
It can be loaded when initializing an object
"""
def savemath5(f,mat,gname):
grp = f.create_group(gname)
mat = mat.tocsr()
dset = grp.create_dataset('shape' ,data=mat.shape )
dset = grp.create_dataset('indices' ,data=mat.indices )
dset = grp.create_dataset('indptr' ,data=mat.indptr )
dset = grp.create_dataset('data' ,data=mat.data )
import h5py as h5
file=h5.File(fname)
grpdof = file['dof']
dset = grpdof.create_dataset('forcingind' ,data=self.forcingind )
dset = grpdof.create_dataset('xf' ,data=self.xf )
dset = grpdof.create_dataset('rf' ,data=self.rf )
dset = grpdof.create_dataset('ivarf' ,data=self.ivarf )
savemath5(file,self.B2 ,'B2')
savemath5(file,self.Qf ,'Qf')
savemath5(file,self.Pf ,'Pf')
file.close()
def LoadHDF5_boundaryforce(self,fname):
def loadmath5(f,gname):
shape = f[gname+'/shape' ].value
indices = f[gname+'/indices'].value
indptr = f[gname+'/indptr' ].value
data = f[gname+'/data' ].value
return sp.csr_matrix((data, indices, indptr), shape=(shape[0], shape[1]))
import h5py as h5
file=h5.File(fname,'r')
self.forcingind =file['dof/forcingind' ].value
self.xf =file['dof/xf' ].value
self.rf =file['dof/rf' ].value
self.ivarf =file['dof/ivarf' ].value
self.B2 = loadmath5(file,'B2')
self.Qf = loadmath5(file,'Qf')
self.Pf = loadmath5(file,'Pf')
file.close()
def LoadForcingBC(self,name):
tmp=loadtxt(name)
n=size(tmp)
ind=[]
for i in range(n):
if abs(tmp[i]) > 1e-10:
ind.append(i)
ind = array(ind)
return ind
def SetBC(self,mat,idx,val):
mat = mat.tocsr()
for i in idx:
j1 = mat.indptr[i]
j2 = mat.indptr[i+1]
for j in range(j1,j2):
if mat.indices[j] == i:
mat.data[j] = val
else:
mat.data[j] = 0.
def GetPf(self,idx):
assert(len(idx) > 0)
nf = len(idx)
ival = idx
jval = range(nf)
dval = ones(nf)
dcoo = array(dval,'complex')
ijcoo = [array(ival,'int'),array(jval,'int')]
# Create COO matrix
Pf = sp.coo_matrix((dcoo,ijcoo),shape=(self.ndof,nf))
# Convert to CSR format
Pf = Pf.tocsc()
return Pf
def PlotBoundaryVar(self,f,ivar,ax,add = ((None,None),(None,None))):
i0 = nonzero(self.ivarf == ivar)[0]
if ax == 'x':
xx = self.xf[i0]
xx = self.xf[i0]
elif ax == 'y':
xx = self.rf[i0]
xx = self.rf[i0]
isort = xx.argsort(); xx = xx[isort];
bc0 = add[0];
bc1 = add[1];
if bc0[0] != None:
xx = append(bc0[0],xx);
if bc1[0] != None:
xx = append(xx,bc1[0]);
ff = f[i0]
ff = ff[isort]
if bc0[0] != None:
ff = append(bc0[1],ff);
if bc1[0] != None:
ff = append(ff,bc1[1]);
if ax == 'x':
plt.plot(xx,ff)
elif ax == 'y':
plt.plot(ff,xx)
return xx,ff
| Python |
# initialize PETSC & SLEPC
import sys, petsc4py,slepc4py
slepc4py.init(sys.argv)
# load freefem tools
import freefem as ff
# load libraries
from numpy import *
import matplotlib.pyplot as plt
import h5py as h5
from petsc4py import PETSc
from slepc4py import SLEPc
from mpi4py import MPI
# load functions for stability analysis
import parfemstab as pfs
from parfemstab import Print,PrintRed,PrintGreen
# Set MUMPS as the linear solver
opts = PETSc.Options()
opts.setValue('ksp_type','preonly')
opts.setValue('pc_type','lu')
opts.setValue('pc_factor_mat_solver_package','mumps')
# Parallel info
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Get the directory where ff++ data is
di = opts.getString('dir')
PrintRed('Running tests in '+ di + '\n')
PrintRed("Testing time stepping routines ... \n")
# Build FreeFEMdisc from .dat files
Print('Loading discretization files ... ')
if rank == 0:
try:
ffdisc = ff.FreeFEMdisc(di+'/ffdata.h5')
Print('data loaded from .h5 file ... ')
except IOError:
ffdisc = ff.FreeFEMdisc(di+'/lin/')
ffdisc.SaveHDF5(di+'/ffdata.h5')
Print('data loaded from .dat file ... ')
# Get the projection operator on velocity DOFs
Pu = ffdisc.getPu(iu=[0,1])
Qr = Pu.transpose()*ffdisc.Q*Pu
else:
ffdisc = ff.EmptyFreeFEMdisc()
Pu = None
Qr = None
PrintGreen('done \n')
# Create PETSC matrices
Print('Convert matrices to PETSC parallel format ... ')
Lmat = pfs.CSR2Mat(ffdisc.L)
Bmat = pfs.CSR2Mat(ffdisc.B)
Pumat = pfs.CSR2Mat(Pu)
Qmat = pfs.CSR2Mat(ffdisc.Q)
Qrmat = pfs.CSR2Mat(Qr)
PrintGreen('done \n')
# Clear some space in memory
del ffdisc.L,ffdisc.B
# Compute modes using SLEPc
Print('Perform time stepping ... \n')
# Set the time step and scheme
cfl = opts.getReal('cfl',10)
cn = opts.getBool('cn',False)
if rank == 0:
hmin = ffdisc.GetHmin()
else:
hmin = 1.
hmin = comm.bcast(hmin ,root = 0)
dt = hmin * cfl
Print('Time stepping parameters ')
if cn:
Print('scheme : CN')
else:
Print('scheme : Euler')
Print('CFL : %g'%cfl)
Print('dt : %g'%dt)
localsizes,globalsizes = Lmat.getSizes()
# Set up the shell matrix and compute the factorizations
t1 = MPI.Wtime()
shell = pfs.OptimalPerturbations(Lmat,Bmat,Pumat,Qmat,Qrmat,dt,CN=cn)
localsizes,globalsizes = Qrmat.getSizes()
TG = PETSc.Mat().create(comm)
TG.setSizes(globalsizes)
TG.setType('python')
TG.setPythonContext(shell)
TG.setUp()
t2 = MPI.Wtime()
Print(' CPU time to build TG object : %10.4g '%(t2-t1))
t1 = MPI.Wtime()
Tfs = [2]
iev = 0
for itf in range(len(Tfs)):
shell.setTf(Tfs[itf])
gains,optperts = pfs.OptimalPerturbationsSLEPc(TG,1)
if rank == 0:
nconv = len(gains)
for i in range(nconv):
Print(' gain : %g '%(sqrt(gains[i]).real))
plt.figure()
ffdisc.PlotVar(Pu*optperts[:,i],0)
plt.show()
t2 = MPI.Wtime()
Print(' CPU time : %10.4g '%(t2-t1))
plt.show()
| Python |
#!/usr/bin/env python
import time
t = time.time()
u = time.gmtime(t)
s = time.strftime('%a, %e %b %Y %T GMT', u)
print 'Content-Type: text/javascript'
print 'Cache-Control: no-cache'
print 'Date: ' + s
print 'Expires: ' + s
print ''
print 'var timeskew = new Date().getTime() - ' + str(t*1000) + ';'
| Python |
#!/usr/bin/python
import sys
agl = []
agltab = []
aglmap = {}
print "/*"
f = open("glyphlist.txt", "r")
for line in f.readlines():
if line[0] == '#':
print line.strip()
continue
line = line[:-1]
name, list = line.split(';')
list = map(lambda x: int(x, 16), list.split(' '))
agl.append((name, list))
for name, ucslist in agl:
num = len(ucslist)
ucs = ucslist[0]
agltab.append((name, ucs))
if ucs not in aglmap:
aglmap[ucs] = []
aglmap[ucs].append(name)
print "*/"
print
def dumplist(list):
n = 0;
for item in list:
n += len(item) + 1
if n > 78:
sys.stdout.write("\n")
n = len(item) + 1
sys.stdout.write(item)
sys.stdout.write(",")
sys.stdout.write("\n")
agltab.sort()
namelist = []
codelist = []
for name, ucs in agltab:
namelist.append("\"%s\"" % name)
codelist.append("%d" % ucs)
keys = aglmap.keys()
keys.sort()
dupoffsets = []
dupnames = []
for ucs in keys:
list = aglmap[ucs]
ofs = len(dupnames)
if len(list) > 1:
dupoffsets.append("%d,%d" % (ucs, ofs))
for name in list:
dupnames.append("\"%s\"" % name)
dupnames.append("0")
print "static const char *agl_name_list[] = {"
dumplist(namelist)
print "};"
print
print "static const unsigned short agl_code_list[] = {"
dumplist(codelist)
print "};"
print
print "static const unsigned short agl_dup_offsets[] = {"
dumplist(dupoffsets)
print "};"
print
print "static const char *agl_dup_names[] = {"
dumplist(dupnames)
print "};"
| Python |
import sys, os, re
HEADER="""<head>
<style>
body { background-color:#fffff0; color:black; margin:16pt; }
a { text-decoration:none; color:darkblue; }
a.line { position:relative; padding-top:300px; }
.comment { color:green; font-style:italic; }
.comment a { color:darkgreen; }
</style>
</head>
<body><pre><pre>"""
FOOTER="""</pre></body>"""
prefixes = [ 'fz_', 'pdf_', 'xps_', 'cbz_', 'pdfapp_' ]
def is_public(s):
for prefix in prefixes:
if s.startswith(prefix):
return True
return False
def load_tags():
tags = {}
for line in open("tags-xref").readlines():
ident, type, line, file, text = line.split(None, 4)
if not is_public(ident):
continue
if type == 'function':
tags[ident] = '<a class="function" href="%s#%s">%s</a>' % (os.path.basename(file), line, ident)
if type == 'typedef' or type == 'struct':
tags[ident] = '<a class="typedef" href="%s#%s">%s</a>' % (os.path.basename(file), line, ident)
return tags
tags = load_tags()
def quote(s):
return s.replace('&','&').replace('<','<').replace('>','>')
print HEADER
N = 1
for line in sys.stdin.readlines():
# expand tabs, html-quote special characters and colorize comments
line = line.replace('\t', ' ').rstrip()
line = quote(line)
line = line.replace("/*", '<span class="comment">/*')
line = line.replace("*/", '*/</span>')
line = re.sub('^#include "([a-z-]*\.h)"', '#include "<a href="\\1">\\1</a>"', line)
# find identifiers and hyperlink to their definitions
words = re.split("(\W+)", line)
line = ""
for word in words:
if word in tags:
word = tags[word]
line += word
#print('<a class="line" name="%d">%4d</a> %s' % (N, N, line))
print('<a class="line" name="%d"></a>%s' % (N, line))
N = N + 1
print FOOTER
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Ricardo Bittencourt (bluepenguin@gmail.com)
# Uploads the direct index to datastore (entity "Series").
# Use with appcfg:
# python appcfg.py upload_data --config_file=direct_index_loader.py \
# --filename=direct.txt --kind=Series app
from google.appengine.ext import db
from google.appengine.tools import bulkloader
import models
def numberListConverter(str):
values = str.split(";")[1]
return [long(i) for i in values.split(":")]
class SeriesLoader(bulkloader.Loader):
def __init__(self):
bulkloader.Loader.__init__(
self, 'Series',
[('issues', lambda x: int(x.split(":")[1])), ('year', lambda x: int(x)),
('name', lambda x: x.decode("utf-8")), ('publisher', lambda x: x.decode("utf-8")),
('hascover', lambda x: int(x))])
def generate_key(self, number, values):
return values[0].split(":")[0]
loaders = [SeriesLoader] | Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Ricardo Bittencourt (bluepenguin@gmail.com)
# Models for entities stored on Datastore.
from google.appengine.ext import db
class SeriesInvertedIndex(db.Model):
seriesNumberList = db.ListProperty(long)
class Series(db.Model):
name = db.StringProperty()
year = db.IntegerProperty()
issues = db.IntegerProperty()
publisher = db.StringProperty()
hascover = db.IntegerProperty()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Ricardo Bittencourt (bluepenguin@gmail.com)
# Dump the ids of all series whose cover has not been downloaded yet.
import gflags
import indextools
import os
import sys
FLAGS = gflags.FLAGS
gflags.DEFINE_string("output", "missing.txt", "File name where the ids will be stored")
FLAGS(sys.argv)
series = indextools.load_series(FLAGS.mysql_user, FLAGS.mysql_password)
missing = [x[0] for x in series if x[5] and not os.path.exists("%d.jpg" % x[0])]
f = open(FLAGS.output, "w")
for miss in missing:
f.write("%d\n" % miss)
f.close() | Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Ricardo Bittencourt (bluepenguin@gmail.com)
# Tools to dump the data from the GCD MySQL database and
# build an index to be used in the datastore.
import codecs
import gflags
import MySQLdb
import sys
import unicodedata
FLAGS = gflags.FLAGS
gflags.DEFINE_string("index_type", "direct", "Type of index to be built, either 'direct' or 'inverse'")
gflags.DEFINE_string("mysql_user", None, "User to connect on the MySQL database")
gflags.DEFINE_string("mysql_password", None, "Password of the user to connect on the MySQL database")
gflags.DEFINE_string("index_output", "direct.txt", "File name where the index will be stored")
def load_series(db_user, db_password):
database = load_database(db_user, db_password)
cursor = database.cursor()
cursor.execute("SELECT id,name,issue_count,year_began,publisher_id,has_gallery FROM gcd_series")
return cursor.fetchall()
def load_database(db_user, db_password):
return MySQLdb.connect(db="gibi", user=db_user, passwd=db_password)
def load_publisher(db_user, db_password):
database = load_database(db_user, db_password)
cursor = database.cursor()
cursor.execute("SELECT id,name FROM gcd_publisher")
return cursor.fetchall()
def word_split(original):
decoded_word = unicode(original, "latin1")
word_list = []
current = []
for letter in decoded_word:
if letter.isalnum():
current.append(letter)
else:
if current:
word_list.append(u"".join(current))
current = []
if current:
word_list.append(u"".join(current))
return [word.lower() for word in word_list]
def remove_accents(words):
output = []
for word in words:
norm = unicodedata.normalize("NFKD", word)
removed = u"".join(c for c in norm if not unicodedata.combining(c))
if (removed != word):
output.append(removed)
return output
def build_inverted_index(data):
inverted = {}
for series in data:
words = word_split(series[1])
words += remove_accents(words)
for word in words:
if word in inverted:
inverted[word].append(series[0])
else:
inverted[word] = [series[0]]
return inverted
def build_direct_index(data):
direct = {}
for series in data:
direct[series[0]] = series
return direct
def search(series, direct, inverse):
words = word_split(series)
choices = reduce(lambda x,y: x.intersection(y), (set(inverse[word]) for word in words))
return [direct[choice] for choice in choices]
def dump_inverted_index(user, password, filename):
data = load_series(user, password)
inv = build_inverted_index(data)
f = codecs.open(filename, "w", "utf-8")
for key,value in inv.iteritems():
if len(value) < 5000:
f.write(u"%s;%s\n" % (key, ":".join(str(i) for i in value)))
else:
print "ignored: " + key
f.close()
def decode(name):
clean_name = name.replace('"', "'")
return unicode('"%s"' % clean_name, "latin1")
def dump_direct_index(user, password, filename):
data = load_series(user, password)
publisher_data = dict(load_publisher(user, password))
f = codecs.open(filename, "w", "utf-8")
for id,name,issues,year,publisher,has_gallery in data:
f.write(u"%s:%s,%s,%s,%s,%s\n" % (id,issues,year,decode(name),decode(publisher_data[publisher]),has_gallery))
f.close()
if __name__ == "__main__":
FLAGS(sys.argv)
if FLAGS.index_type == "direct":
dump_direct_index(FLAGS.mysql_user, FLAGS.mysql_password, FLAGS.index_output)
elif FLAGS.index_type == "inverse":
dump_inverted_index(FLAGS.mysql_user, FLAGS.mysql_password, FLAGS.index_output) | Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Ricardo Bittencourt (bluepenguin@gmail.com)
# Crawls the series covers from the gcd site.
import gflags
import os
import random
import re
import sys
import threading
import urllib2
FLAGS = gflags.FLAGS
gflags.DEFINE_string("covers", "missing.txt", "File name from where the covers ids will be read")
gflags.DEFINE_integer("threads", 20, "Number of crawling threads to use")
class Crawler(threading.Thread):
def __init__(self, id, files):
threading.Thread.__init__(self)
self.files = files
self.id = id
def run(self):
for line in self.files:
id = line.strip()
#print "thread %d: %s" % (self.id, id)
if os.path.exists("%s.jpg" % id):
continue
info_url = "http://www.comics.org/series/%s/" % id
page = urllib2.urlopen(info_url).read()
match = re.search('"(http://images\.comics\.org//img.gcd/covers_by_id[^"]+)"', page)
if match is not None:
print "thread %d: %s" % (self.id, id)
image_url = match.group(1)
image = urllib2.urlopen(image_url).read()
image_file = open("%s.jpg" % id, "wb")
image_file.write(image)
image_file.close()
FLAGS(sys.argv)
number_threads = FLAGS.threads
input = open(FLAGS.covers, "r").readlines()
random.shuffle(input)
slices = [input[i::number_threads] for i in range(number_threads)]
threads = [Crawler(i, slices[i]) for i in range(number_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Ricardo Bittencourt (bluepenguin@gmail.com)
# Uploads the inverted index to datastore (entity "SeriesInvertedIndex").
# Use with appcfg:
# python appcfg.py upload_data --config_file=inverted_index_loader.py \
# --filename=inverse.txt --kind=SeriesInvertedIndex app
from google.appengine.ext import db
from google.appengine.tools import bulkloader
import models
def numberListConverter(str):
values = str.split(";")[1]
return [long(i) for i in values.split(":")]
class SeriesInvertedIndexLoader(bulkloader.Loader):
def __init__(self):
bulkloader.Loader.__init__(
self, 'SeriesInvertedIndex',
[('seriesNumberList', numberListConverter)])
def generate_key(self, number, values):
return values[0].split(";")[0].decode("utf-8")
loaders = [SeriesInvertedIndexLoader] | Python |
'''
Created on 21-03-2011
@author: maciek
'''
def formatString(format, **kwargs):
'''
'''
if not format: return ''
for arg in kwargs.keys():
format = format.replace("{" + arg + "}", "##" + arg + "##")
format = format.replace ("{", "{{")
format = format.replace("}", "}}")
for arg in kwargs.keys():
format = format.replace("##" + arg + "##", "{" + arg + "}")
res = format.format(**kwargs)
res = res.replace("{{", "{")
res = res.replace("}}", "}")
return res | Python |
'''
Created on 21-03-2011
@author: maciek
'''
from IndexGenerator import IndexGenerator
from optparse import OptionParser
import os
import tempfile
import shutil
import logging
logging.basicConfig(level = logging.DEBUG)
parser = OptionParser()
parser.add_option('-n', '--app-name', action='store', dest='appName', help='aplication name')
parser.add_option('-u', '--release-urls', action='store', dest='releaseUrls', help='URLs of download files - as coma separated list of entrires')
parser.add_option('-d', '--destination-directory', action='store', dest='otaAppDir', help='Directory where OTA files are created')
parser.add_option('-v', '--version', action='store', dest='version', help='Version of the application')
parser.add_option('-r', '--releases', action='store', dest='releases', help='Release names of the application')
parser.add_option('-R', '--release-notes', action='store', dest='releaseNotes', help='Release notes of the application (in txt2tags format)')
parser.add_option('-D', '--description', action='store', dest='description', help='Description of the application (in txt2tags format)')
(options, args) = parser.parse_args()
if options.appName == None:
parser.error("Please specify the appName.")
elif options.releaseUrls == None:
parser.error("Please specify releaseUrls")
elif options.otaAppDir == None:
parser.error("Please specify destination directory")
elif options.version == None:
parser.error("Please specify version")
elif options.releases == None:
parser.error("Please specify releases")
elif options.releaseNotes == None:
parser.error("Please specify releaseNotes")
elif options.description == None:
parser.error("Please specify description")
appName = options.appName
releaseUrls = options.releaseUrls
otaAppDir = options.otaAppDir
version = options.version
releases = options.releases
releaseNotes = options.releaseNotes
description = options.description
def findIconFilename():
iconPath = "res/drawable-hdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-mdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-ldpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable/icon.png"
logging.debug("IconPath: "+iconPath)
return iconPath
def createOTApackage():
'''
crates all needed files in tmp dir
'''
releaseNotesContent = open(releaseNotes).read()
descriptionContent = open(description).read()
indexGenerator = IndexGenerator(appName, releaseUrls, releaseNotesContent, descriptionContent, version, releases)
index = indexGenerator.get();
tempIndexFile = tempfile.TemporaryFile()
tempIndexFile.write(index)
tempIndexFile.flush()
tempIndexFile.seek(0)
return tempIndexFile
tempIndexFile = createOTApackage()
if not os.path.isdir(otaAppDir):
logging.debug("creating dir: "+otaAppDir)
os.mkdir(otaAppDir)
else:
logging.warning("dir: "+otaAppDir+" exists")
indexFile = open(os.path.join(otaAppDir,"index.html"),'w')
shutil.copyfileobj(tempIndexFile, indexFile)
srcIconFileName = findIconFilename()
disIconFileName = os.path.join(otaAppDir,"Icon.png")
shutil.copy(srcIconFileName,disIconFileName)
| Python |
'''
Created on 21-03-2011
@author: maciek
'''
from formater import formatString
import os
class IndexGenerator(object):
'''
Generates Index.html for iOS app OTA distribution
'''
basePath = os.path.dirname(__file__)
templateFile = os.path.join(basePath,"templates/index.tmpl")
releaseUrls = ""
appName = ""
changeLog = ""
description = ""
version = ""
release = ""
def __init__(self,appName, releaseUrls, changeLog, description, version, releases):
'''
Constructor
'''
self.appName = appName
self.releaseUrls = releaseUrls
self.changeLog = changeLog
self.description = description
self.version = version
self.releases = releases
def get(self):
'''
returns index.html source code from template file
'''
urlList = self.releaseUrls.split(",")
releaseList = self.releases.split(",")
generatedHtml=""
count=0;
for release in releaseList:
generatedHtml += " <li>\n"
generatedHtml += " <h3><a href=\"javascript:load('" + urlList[count] + "')\">" + release + "</a></h3>\n"
generatedHtml += " </li>\n"
count += 1
template = open(self.templateFile).read()
index = formatString(template, downloads=generatedHtml,
changeLog=self.changeLog,
appName=self.appName,
description=self.description,
version = self.version);
return index | Python |
#!/usr/bin/python2.5
# Copyright 2009-2010 by Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interactive Python console connected to an app's datastore.
Instead of running this script directly, use the 'console' shell script,
which sets up the PYTHONPATH and other necessary environment variables."""
import code
import getpass
import logging
import optparse
import os
import sys
import urllib
import yaml
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext import db
# Make some useful environment variables available.
APP_DIR = os.environ['APP_DIR']
APPENGINE_DIR = os.environ['APPENGINE_DIR']
PROJECT_DIR = os.environ['PROJECT_DIR']
TOOLS_DIR = os.environ['TOOLS_DIR']
# Set up more useful representations, handy for interactive data manipulation
# and debugging. Unfortunately, the App Engine runtime relies on the specific
# output of repr(), so this isn't safe in production, only debugging.
def key_repr(key):
levels = []
while key:
levels.insert(0, '%s %s' % (key.kind(), key.id() or repr(key.name())))
key = key.parent()
return '<Key: %s>' % '/'.join(levels)
db.Key.__repr__ = key_repr
def model_repr(model):
if model.is_saved():
key = model.key()
return '<%s: %s>' % (key.kind(), key.id() or repr(key.name()))
else:
return '<%s: unsaved>' % model.kind()
db.Model.__repr__ = model_repr
def get_app_id():
"""Gets the app_id from the app.yaml configuration file."""
return yaml.safe_load(open(APP_DIR + '/app.yaml'))['application']
def connect(server, app_id=None, username=None, password=None):
"""Sets up a connection to an app that has the remote_api handler."""
if not app_id:
app_id = get_app_id()
print 'Application ID: %s' % app_id
print 'Server: %s' % server
if not username:
username = raw_input('Username: ')
else:
print 'Username: %s' % username
# Sets up users.get_current_user() inside of the console
os.environ['USER_EMAIL'] = username
if not password:
password = getpass.getpass('Password: ')
remote_api_stub.ConfigureRemoteDatastore(
app_id, '/remote_api', lambda: (username, password), server)
db.Query().count() # force authentication to happen now
if __name__ == '__main__':
default_address = 'localhost'
default_port = 8080
default_app_id = get_app_id()
parser = optparse.OptionParser(usage='''%%prog [options] [server]
Starts an interactive console connected to an App Engine datastore.
The [server] argument is a shorthand for setting the hostname, port
number, and application ID. For example:
%%prog xyz.appspot.com # uses port 80, app ID 'xyz'
%%prog localhost:6789 # uses port 6789, app ID %r''' % default_app_id)
parser.add_option('-a', '--address',
help='appserver hostname (default: localhost)')
parser.add_option('-p', '--port', type='int',
help='appserver port number (default: 8080)')
parser.add_option('-A', '--application',
help='application ID (default: %s)' % default_app_id)
parser.add_option('-u', '--username',
help='username (in the form of an e-mail address)')
parser.add_option('-c', '--command',
help='Python commands to execute')
options, args = parser.parse_args()
# Handle shorthand for address, port number, and app ID.
if args:
default_address, default_port = urllib.splitport(args[0])
default_port = int(default_port or 80)
if default_address != 'localhost':
default_app_id = default_address.split('.')[0]
# Apply defaults. (We don't use optparse defaults because we want to let
# explicit settings override our defaults.)
address = options.address or default_address
port = options.port or default_port
app_id = options.application or default_app_id
username = options.username
password = None
# Use a dummy password when connecting to a development app server.
if address == 'localhost':
password = 'foo'
logging.basicConfig(file=sys.stderr, level=logging.INFO)
connect('%s:%d' % (address, port), app_id, username, password)
if options.command:
exec options.command
else:
code.interact('', None, locals())
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
# Copyright 2010 Joe LaPenna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Latitude OAuth client."""
import oauth
import oauth_appengine
class LatitudeOAuthClient(oauth_appengine.OAuthClient):
"""Latitude-specific OAuth client.
Per: http://code.google.com/apis/gdata/articles/oauth.html
"""
REQUEST_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetRequestToken'
ACCESS_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetAccessToken'
AUTHORIZATION_URL = \
'https://www.google.com/latitude/apps/OAuthAuthorizeToken'
SCOPE = 'https://www.googleapis.com/auth/latitude'
def __init__(self, oauth_consumer=None, oauth_token=None):
super(LatitudeOAuthClient, self).__init__(
oauth_consumer=oauth_consumer,
oauth_token=oauth_token,
request_token_url=LatitudeOAuthClient.REQUEST_TOKEN_URL,
access_token_url=LatitudeOAuthClient.ACCESS_TOKEN_URL,
authorization_url=LatitudeOAuthClient.AUTHORIZATION_URL)
self.signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
class Latitude(object):
"""API access to Latitude."""
REST_URL = 'https://www.googleapis.com/latitude/v1/%s'
def __init__(self, oauth_client):
self.client = oauth_client
def get_current_location(self):
request = oauth.OAuthRequest.from_consumer_and_token(
self.client.get_consumer(),
token=self.client.get_token(),
http_method='GET',
http_url=Latitude.REST_URL % ('currentLocation',),
parameters={'granularity': 'best'})
request.sign_request(
self.client.signature_method,
self.client.get_consumer(),
self.client.get_token())
return self.client.access_resource(request)
| Python |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Steps of the OAuth dance implemented for the webapp framework."""
__author__ = 'Ka-Ping Yee <kpy@google.com>'
import oauth
import oauth_appengine
def redirect_to_authorization_page(
handler, oauth_client, callback_url, parameters):
"""Sends the user to an OAuth authorization page."""
# Get a request token.
helper = oauth_appengine.OAuthDanceHelper(oauth_client)
request_token = helper.GetRequestToken(callback_url, parameters)
# Save the request token in cookies so we can pick it up later.
handler.response.headers.add_header(
'Set-Cookie', 'request_key=' + request_token.key)
handler.response.headers.add_header(
'Set-Cookie', 'request_secret=' + request_token.secret)
# Send the user to the authorization page.
handler.redirect(
helper.GetAuthorizationRedirectUrl(request_token, parameters))
def handle_authorization_finished(handler, oauth_client):
"""Handles a callback from the OAuth authorization page and returns
a freshly minted access token."""
# Pick up the request token from the cookies.
request_token = oauth.OAuthToken(
handler.request.cookies['request_key'],
handler.request.cookies['request_secret'])
# Upgrade our request token to an access token, using the verifier.
helper = oauth_appengine.OAuthDanceHelper(oauth_client)
access_token = helper.GetAccessToken(
request_token, handler.request.get('oauth_verifier', None))
# Clear the cookies that contained the request token.
handler.response.headers.add_header('Set-Cookie', 'request_key=')
handler.response.headers.add_header('Set-Cookie', 'request_secret=')
return access_token
| Python |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple app that performs the OAuth dance and makes a Latitude request."""
__author__ = 'Ka-Ping Yee <kpy@google.com>'
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp.util import run_wsgi_app
import latitude
import oauth
import oauth_webapp
OAUTH_CALLBACK_PATH = '/oauth_callback'
# To set up this application as an OAuth consumer:
# 1. Go to https://www.google.com/accounts/ManageDomains
# 2. Follow the instructions to register and verify your domain
# 3. The administration page for your domain should now show an "OAuth
# Consumer Key" and "OAuth Consumer Secret". Put these values into
# the app's datastore by calling Config.set('oauth_consumer_key', ...)
# and Config.set('oauth_consumer_secret', ...).
class Config(db.Model):
value = db.StringProperty()
@staticmethod
def get(name):
config = Config.get_by_key_name(name)
return config and config.value
@staticmethod
def set(name, value):
Config(key_name=name, value=value).put()
oauth_consumer = oauth.OAuthConsumer(
Config.get('oauth_consumer_key'), Config.get('oauth_consumer_secret'))
class Main(webapp.RequestHandler):
"""This main page immediately redirects to the OAuth authorization page."""
def get(self):
parameters = {
'scope': latitude.LatitudeOAuthClient.SCOPE,
'domain': Config.get('oauth_consumer_key'),
'granularity': 'best',
'location': 'all'
}
oauth_webapp.redirect_to_authorization_page(
self, latitude.LatitudeOAuthClient(oauth_consumer),
self.request.host_url + OAUTH_CALLBACK_PATH, parameters)
class LatitudeOAuthCallbackHandler(webapp.RequestHandler):
"""After the user gives permission, the user is redirected back here."""
def get(self):
access_token = oauth_webapp.handle_authorization_finished(
self, latitude.LatitudeOAuthClient(oauth_consumer))
# Request the user's location
client = latitude.LatitudeOAuthClient(oauth_consumer, access_token)
result = latitude.Latitude(client).get_current_location()
self.response.out.write('Your location is: ' + result.content)
if __name__ == '__main__':
run_wsgi_app(webapp.WSGIApplication([
('/', Main),
(OAUTH_CALLBACK_PATH, LatitudeOAuthCallbackHandler)
]))
| Python |
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""A data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def __repr__(self):
return 'OAuthToken(%r, %r)' % (self.key, self.secret)
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(
consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(
oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(
oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError(
'Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(
oauth_request, consumer, token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(
oauth_request, consumer, token)
return key
| Python |
#!/usr/bin/env python
# Copyright 2009 Joe LaPenna
# Copyright 2009 Google
"""
An appengine OAuthClient based on the oauth-python reference implementation.
"""
import oauth
from google.appengine.api import urlfetch
from google.appengine.ext import db
class OAuthClient(oauth.OAuthClient):
"""A worker to attempt to execute a request (on appengine)."""
def __init__(self, oauth_consumer, oauth_token, request_token_url='',
access_token_url='', authorization_url=''):
super(OAuthClient, self).__init__(oauth_consumer, oauth_token)
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorization_url = authorization_url
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
# Using headers or payload varies by service...
response = urlfetch.fetch(
url=self.request_token_url,
method=oauth_request.http_method,
#headers=oauth_request.to_header(),
payload=oauth_request.to_postdata())
return oauth.OAuthToken.from_string(response.content)
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
response = urlfetch.fetch(
url=self.access_token_url,
method=oauth_request.http_method,
headers=oauth_request.to_header())
return oauth.OAuthToken.from_string(response.content)
def access_resource(self, oauth_request, deadline=None):
"""-> Some protected resource."""
if oauth_request.http_method == 'GET':
url = oauth_request.to_url()
return urlfetch.fetch(
url=url,
method=oauth_request.http_method)
else:
payload = oauth_request.to_postdata()
return urlfetch.fetch(
url=oauth_request.get_normalized_http_url(),
method=oauth_request.http_method,
payload=payload)
class OAuthDanceHelper(object):
def __init__(self, oauth_client):
self.oauth_client = oauth_client
def GetRequestToken(self, callback, parameters=None):
"""Gets a request token from an OAuth provider."""
request_token_request = oauth.OAuthRequest.from_consumer_and_token(
self.oauth_client.get_consumer(),
token=None,
callback=callback,
http_method='POST',
http_url=self.oauth_client.request_token_url,
parameters=parameters)
# Request a token that we can use to redirect the user to an auth url.
request_token_request.sign_request(
self.oauth_client.signature_method,
self.oauth_client.get_consumer(),
None)
return self.oauth_client.fetch_request_token(request_token_request)
def GetAuthorizationRedirectUrl(self, request_token, parameters=None):
"""Gets the redirection URL for the OAuth authorization page."""
authorization_request = oauth.OAuthRequest.from_token_and_callback(
request_token,
http_method='GET',
http_url=self.oauth_client.authorization_url,
parameters=parameters)
return authorization_request.to_url()
def GetAccessToken(self, request_token, verifier):
"""Upgrades a request token to an access token."""
access_request = oauth.OAuthRequest.from_consumer_and_token(
self.oauth_client.get_consumer(),
token=request_token,
verifier=verifier,
http_url=self.oauth_client.access_token_url)
access_request.sign_request(
self.oauth_client.signature_method,
self.oauth_client.get_consumer(),
request_token)
return self.oauth_client.fetch_access_token(access_request)
| Python |
#!/usr/bin/env pypy
import os, sys, logging, re
import argparse
import fnmatch
configurations = {'lite', 'pro'}
package_dirs = {
'lite': ('src/cx/hell/android/pdfview',),
'pro': ('src/cx/hell/android/pdfviewpro',)
}
file_replaces = {
'lite': (
'cx.hell.android.pdfview.',
'"cx.hell.android.pdfview"',
'package cx.hell.android.pdfview;',
'android:icon="@drawable/pdfviewer"',
),
'pro': (
'cx.hell.android.pdfviewpro.',
'"cx.hell.android.pdfviewpro"',
'package cx.hell.android.pdfviewpro;',
'android:icon="@drawable/apvpro_icon"',
),
}
def make_comment(file_type, line):
"""Add comment to line and return modified line, but try not to add comments to already commented out lines."""
if file_type in ('java', 'c'):
return '// ' + line if not line.startswith('//') else line
elif file_type in ('html', 'xml'):
return '<!-- ' + line.strip() + ' -->\n' if not line.strip().startswith('<!--') else line
else:
raise Exception("unknown file type: %s" % file_type)
def remove_comment(file_type, line):
"""Remove comment from line, but only if line is commented, otherwise return unchanged line."""
if file_type in ('java', 'c'):
if line.startswith('// '): return line[3:]
else: return line
elif file_type in ('html', 'xml'):
if line.strip().startswith('<!-- ') and line.strip().endswith(' -->'): return line.strip()[5:-4] + '\n'
else: return line
else:
raise Exception("unknown file type: %s" % file_type)
def handle_comments(conf, file_type, lines, filename):
new_lines = []
re_cmd_starts = re.compile(r'(?:(//|<!--))\s+#ifdef\s+(?P<def>[a-zA-Z]+)')
re_cmd_ends = re.compile(r'(?:(//|<!--))\s+#endif')
required_defs = []
for i, line in enumerate(lines):
m = re_cmd_starts.search(line)
if m:
required_def = m.group('def')
logging.debug("line %s:%d %s matches as start of %s" % (filename, i+1, line.strip(), required_def))
required_defs.append(required_def)
new_lines.append(line)
continue
m = re_cmd_ends.search(line)
if m:
logging.debug("line %s:%d %s matches as endif" % (filename, i+1, line.strip()))
required_defs.pop()
new_lines.append(line)
continue
if len(required_defs) == 0:
new_lines.append(line)
elif len(required_defs) == 1 and required_defs[0] == conf:
new_line = remove_comment(file_type, line)
new_lines.append(new_line)
else:
new_line = make_comment(file_type, line)
new_lines.append(new_line)
assert len(new_lines) == len(lines)
return new_lines
def find_files(dirname, name):
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, name):
matches.append(os.path.join(root, filename))
return matches
def fix_package_dirs(conf):
for i, dirname in enumerate(package_dirs[conf]):
logging.debug("trying to restore %s" % dirname)
if os.path.exists(dirname):
if os.path.isdir(dirname):
logging.debug(" already exists")
continue
else:
logging.error(" %s already exists, but is not dir" % dirname)
continue
# find other name
found_dirname = None
for other_conf, other_dirnames in package_dirs.items():
other_dirname = other_dirnames[i]
if other_conf == conf: continue # skip this conf when looking for other conf
if os.path.isdir(other_dirname):
if found_dirname is None:
found_dirname = other_dirname
else:
# source dir already found :/
raise Exception("too many possible dirs for this package: %s, %s" % (found_dirname, other_dirname))
if found_dirname is None:
raise Exception("didn't find %s" % dirname)
# now rename found_dirname to dirname
os.rename(found_dirname, dirname)
logging.debug("renamed %s to %s" % (found_dirname, dirname))
def handle_comments_in_files(conf, file_type, filenames):
for filename in filenames:
lines = open(filename).readlines()
new_lines = handle_comments(conf, file_type, lines, filename)
if lines != new_lines:
logging.debug("file %s comments changed" % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
def replace_in_files(conf, filenames):
#logging.debug("about replace to %s in %s" % (conf, ', '.join(filenames)))
other_confs = [other_conf for other_conf in file_replaces.keys() if other_conf != conf]
#logging.debug("there are %d other confs to replace from: %s" % (len(other_confs), ', '.join(other_confs)))
for filename in filenames:
new_lines = []
lines = open(filename).readlines()
for line in lines:
new_line = line
for i, target_string in enumerate(file_replaces[conf]):
for other_conf in other_confs:
source_string = file_replaces[other_conf][i]
new_line = new_line.replace(source_string, target_string)
new_lines.append(new_line)
if new_lines != lines:
logging.debug("file %s changed, writing..." % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
else:
logging.debug("file %s didn't change, no need to rewrite" % filename)
def fix_java_files(conf):
filenames = find_files('src', name='*.java')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'java', filenames)
def fix_xml_files(conf):
filenames = find_files('.', name='*.xml')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'xml', filenames)
def fix_html_files(conf):
filenames = find_files('res', name='*.html')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'html', filenames)
def fix_c_files(conf):
filenames = find_files('jni/pdfview2', name='*.c')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
filenames = find_files('jni/pdfview2', name='*.h')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
def fix_resources(conf):
pass
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Switch project configurations')
parser.add_argument('--configuration', dest='configuration', default='lite')
args = parser.parse_args()
if not os.path.exists('AndroidManifest.xml'):
raise Exception('android manifest not found, please run this script from main project directory')
conf = args.configuration
if conf not in configurations:
raise Exception("invalid configuration: %s" % conf)
fix_package_dirs(conf)
fix_java_files(conf)
fix_xml_files(conf)
fix_html_files(conf)
fix_c_files(conf)
fix_resources(conf)
if __name__ == '__main__':
main()
| Python |
'''
@author: Deli, BYK
@contact: gulen.ilker@hotmail.com, madbyk@gmail.com
@summary:
Provides solving and post-processing functions.
Solving function uses SciPy's sparse solving function from sparse linear algebra module(sparse.spsolve).
Post process shapes the coordinates and results into a proper form to be plotted,
refines the plotting range and approximates mid values using node neighbors
and then produces a smooth contour plot.
@version: 1.6
'''
def solve_system(K, F):
"""
Solves the K * x = F system using the linear sparse matrix solver.
The spsolve function used here can be replaced with qmr or some other
non-linear solver function for non-linear problems such as N-S problems.
"""
from scipy.sparse import linalg
print("Solving system...")
return linalg.spsolve(K, F)
def save_solution(file_name, solution):
from json import dump
print(" * Writing output file...")
output_file = open(file_name, "w")
dump({"T": solution.tolist()}, output_file)
output_file.close()
def plot_solution(problem_data, solution):
from numpy import linspace
from matplotlib.pylab import contour, colorbar, contourf, xlabel, ylabel, title, show
from matplotlib.mlab import griddata
print(" * Preparing for plotting...")
NN = problem_data["NN"]
#Extract node coordinates seperately for plotting
x, y = [0] * NN, [0] * NN
for i, node in enumerate(problem_data["nodes"]):
x[i] = node[0]
y[i] = node[1]
#refine the contour plot mesh for a "smoother" image, generate a 200*200 grid
xi = linspace(min(x), max(x), 200)
yi = linspace(min(y), max(y), 200)
#approximate the mid values from neighbors
zi = griddata(x, y, solution, xi, yi)
print(" * Plotting...")
#plot the contour lines with black
contour(xi, yi, zi, 15, linewidths = 0.5, colors = 'k')
#plot the filled contour plot
plot = contourf(xi, yi, zi, 15, antialiased = True)
colorbar(plot, format = "%.3f").set_label("T")
xlabel('X')
ylabel('Y')
title("Contour plot of T values for {0}".format(problem_data["title"]))
show()
def post_process(problem_data, solution, arguments):
"""
Performs the necessary post processing operations on the "solution"
using the problem_data such as contour plotting, SV calculating etc.
"""
print("Post processing...")
if not arguments.dontsave:
save_solution(problem_data["output"], solution)
if not arguments.dontplot:
plot_solution(problem_data, solution)
| Python |
'''
@author: Deli, BYK
@contact: gulen.ilker@hotmail.com, madbyk@gmail.com
@summary:
Provides the routines to read and process problem data to be used in the calculation of the global system.
Also includes Gauss Quadrature and shape function definitions. Calling get_problem_data() from an external
module is enough the get the problem data easily.
@version: 1.2
'''
from math import sqrt
from inpread import read_input_data
from json import load as json_load
import os
GQ = {
#----------------------------------------------------
# Definition of Gauss Quadrature Points
# Format for use: GQ[eType][NGP]["coords" or "weight"]
#----------------------------------------------------
"quad":
{
1:
[
{"coord": (0., 0.), "weight": 4.}
],
4:
[
{"coord": (-sqrt(1. / 3.), -sqrt(1. / 3.)), "weight": 1.},
{"coord": (sqrt(1. / 3.), -sqrt(1. / 3.)), "weight": 1.},
{"coord": (-sqrt(1. / 3.), sqrt(1. / 3.)), "weight": 1.},
{"coord": (sqrt(1. / 3.), sqrt(1. / 3.)), "weight": 1.}
],
9:
[
{"coord": (-sqrt(3. / 5.), -sqrt(3. / 5.)), "weight": 25. / 81.},
{"coord": (0., -sqrt(3. / 5.)), "weight": 40. / 81.},
{"coord": (sqrt(3. / 5.), -sqrt(3. / 5.)), "weight": 25. / 81.},
{"coord": (-sqrt(3. / 5.), 0.), "weight": 40. / 81.},
{"coord": (0., 0.), "weight": 64. / 81.},
{"coord": (sqrt(3. / 5.), 0.), "weight": 40. / 81.},
{"coord": (-sqrt(3. / 5.), sqrt(3. / 5.)), "weight": 25. / 81.},
{"coord": (0., sqrt(3. / 5.)), "weight": 40. / 81.},
{"coord": (sqrt(3. / 5.), sqrt(3. / 5.)), "weight": 25. / 81.}
],
16:
[
{"coord": (-0.8611363116, -0.8611363116), "weight": 0.3478548451 * 0.3478548451},
{"coord": (-0.3399810435, -0.8611363116), "weight": 0.3478548451 * 0.6521451548},
{"coord": (0.3399810435, -0.8611363116), "weight": 0.3478548451 * 0.6521451548},
{"coord": (0.8611363116, -0.8611363116), "weight": 0.3478548451 * 0.3478548451},
{"coord": (-0.8611363116, -0.3399810435), "weight": 0.6521451548 * 0.3478548451},
{"coord": (-0.3399810435, -0.3399810435), "weight": 0.6521451548 * 0.6521451548},
{"coord": (0.3399810435, -0.3399810435), "weight": 0.6521451548 * 0.6521451548},
{"coord": (0.8611363116, -0.3399810435), "weight": 0.6521451548 * 0.3478548451},
{"coord": (-0.8611363116, 0.3399810435), "weight": 0.6521451548 * 0.3478548451},
{"coord": (-0.3399810435, 0.3399810435), "weight": 0.6521451548 * 0.6521451548},
{"coord": (0.3399810435, 0.3399810435), "weight": 0.6521451548 * 0.6521451548},
{"coord": (0.8611363116, 0.3399810435), "weight": 0.6521451548 * 0.3478548451},
{"coord": (-0.8611363116, 0.8611363116), "weight": 0.3478548451 * 0.3478548451},
{"coord": (-0.3399810435, 0.8611363116), "weight": 0.3478548451 * 0.6521451548},
{"coord": (0.3399810435, 0.8611363116), "weight": 0.3478548451 * 0.6521451548},
{"coord": (0.8611363116, 0.8611363116), "weight": 0.3478548451 * 0.3478548451}
]
},
"tri":
{
1:
[
{"coord": (1. / 3., 1. / 3.), "weight": 0.5}
],
3:
[
{"coord": (0.5, 0.), "weight": 1. / 6.},
{"coord": (0., 0.5), "weight": 1. / 6.},
{"coord": (0.5, 0.5), "weight": 1. / 6.}
],
4:
[
{"coord": (1. / 3., 1. / 3.), "weight":-27. / 96.},
{"coord": (0.6, 0.2), "weight": 25. / 96.},
{"coord": (0.2, 0.6), "weight": 25. / 96.},
{"coord": (0.2, 0.2), "weight": 25. / 96.}
],
7:
[
{"coord": (1. / 3., 1. / 3.), "weight": 0.225 / 2.},
{"coord": (0.059715871789770, 0.470142064105115), "weight": 0.132394152788 / 2.},
{"coord": (0.470142064105115, 0.059715871789770), "weight": 0.132394152788 / 2.},
{"coord": (0.470142064105115, 0.470142064105115), "weight": 0.132394152788 / 2.},
{"coord": (0.101286507323456, 0.797426985353087), "weight": 0.125939180544 / 2.},
{"coord": (0.101286507323456, 0.101286507323456), "weight": 0.125939180544 / 2.},
{"coord": (0.797426985353087, 0.101286507323456), "weight": 0.125939180544 / 2.}
]
}
}
Shape = {
#=============================================================================
# Definition of Shape functions and their derivatives
# Format for use: Shape[eType]["linear" or "quadratic"][main/dKsi/dEta](ksi, eta)
#=============================================================================
"tri":
{
"linear":
[
{"main": lambda ksi, eta: 1 - ksi - eta, "dKsi": lambda ksi, eta:-1, "dEta": lambda ksi, eta:-1},
{"main": lambda ksi, eta: ksi, "dKsi": lambda ksi, eta: 1, "dEta": lambda ksi, eta: 0},
{"main": lambda ksi, eta: eta, "dKsi": lambda ksi, eta: 0, "dEta": lambda ksi, eta: 1}
],
"quadratic":
[
{"main": lambda ksi, eta: 2 * (1 - ksi - eta) * (.5 - ksi - eta), "dKsi": lambda ksi, eta :-3 + 4 * eta + 2 * ksi, "dEta": lambda ksi, eta:-3 + 4 * ksi + 2 * eta},
{"main": lambda ksi, eta: 2 * ksi * (ksi - .5), "dKsi": lambda ksi, eta : 4 * ksi - 1, "dEta": lambda ksi, eta:0},
{"main": lambda ksi, eta: 2 * eta * (eta - .5), "dKsi": lambda ksi, eta :0, "dEta": lambda ksi, eta: 4 * ksi - 1},
{"main": lambda ksi, eta: 4 * (1 - ksi - eta) * ksi, "dKsi": lambda ksi, eta : 4 * (1 - 2 * ksi - eta), "dEta": lambda ksi, eta:-4 * ksi},
{"main": lambda ksi, eta: 4 * ksi * eta, "dKsi": lambda ksi, eta :4 * eta, "dEta": lambda ksi, eta:4 * ksi},
{"main": lambda ksi, eta: 4 * (1 - ksi - eta) * eta, "dKsi": lambda ksi, eta :-4 * eta, "dEta": lambda ksi, eta:4 * (1 - 2 * eta - ksi)}
]
},
"quad":
{
"linear":
[
{"main": lambda ksi, eta: .25 * (1 - ksi) * (1 - eta), "dKsi": lambda ksi, eta:-.25 * (1 - eta), "dEta": lambda ksi, eta:-.25 * (1 - ksi)},
{"main": lambda ksi, eta: .25 * (1 + ksi) * (1 - eta), "dKsi": lambda ksi, eta: .25 * (1 - eta), "dEta": lambda ksi, eta:-.25 * (1 + ksi)},
{"main": lambda ksi, eta: .25 * (1 + ksi) * (1 + eta), "dKsi": lambda ksi, eta: .25 * (1 + eta), "dEta": lambda ksi, eta: .25 * (1 + ksi)},
{"main": lambda ksi, eta: .25 * (1 - ksi) * (1 + eta), "dKsi": lambda ksi, eta:-.25 * (1 + eta), "dEta": lambda ksi, eta: .25 * (1 - ksi)}
],
"quadratic":
[
{"main": lambda ksi, eta: .25 * (ksi ** 2 - ksi) * (eta ** 2 - eta), "dKsi": lambda ksi, eta: .25 * (2 * ksi - 1) * (eta ** 2 - eta), "dEta": lambda ksi, eta: .25 * (ksi ** 2 - ksi) * (2 * eta - 1)},
{"main": lambda ksi, eta: .25 * (ksi ** 2 + ksi) * (eta ** 2 - eta), "dKsi": lambda ksi, eta: .25 * (2 * ksi + 1) * (eta ** 2 - eta), "dEta": lambda ksi, eta: .25 * (ksi ** 2 + ksi) * (2 * eta - 1)},
{"main": lambda ksi, eta: .25 * (ksi ** 2 + ksi) * (eta ** 2 + eta), "dKsi": lambda ksi, eta: .25 * (2 * ksi + 1) * (eta ** 2 + eta), "dEta": lambda ksi, eta: .25 * (ksi ** 2 + ksi) * (2 * eta + 1)},
{"main": lambda ksi, eta: .25 * (ksi ** 2 - ksi) * (eta ** 2 + eta), "dKsi": lambda ksi, eta: .25 * (2 * ksi - 1) * (eta ** 2 + eta), "dEta": lambda ksi, eta: .25 * (ksi ** 2 - ksi) * (2 * eta + 1)},
{"main": lambda ksi, eta: .5 * (1 - ksi ** 2) * (eta ** 2 - eta), "dKsi": lambda ksi, eta: .5 * -2 * ksi * (eta ** 2 - eta), "dEta": lambda ksi, eta: .5 * (1 - ksi ** 2) * (2 * eta - 1)},
{"main": lambda ksi, eta: .5 * (ksi ** 2 + ksi) * (1 - eta ** 2), "dKsi": lambda ksi, eta: .5 * (2 * ksi + 1) * (1 - eta ** 2), "dEta": lambda ksi, eta: .5 * (ksi ** 2 + ksi) * -2 * eta},
{"main": lambda ksi, eta: .5 * (1 - ksi ** 2) * (eta ** 2 + eta), "dKsi": lambda ksi, eta: .5 * -2 * ksi * (eta ** 2 + eta), "dEta": lambda ksi, eta: .5 * (1 - ksi ** 2) * (2 * eta + 1)},
{"main": lambda ksi, eta: .5 * (ksi ** 2 - ksi) * (1 - eta ** 2), "dKsi": lambda ksi, eta: .5 * (2 * ksi - 1) * (1 - eta ** 2), "dEta": lambda ksi, eta: .5 * (ksi ** 2 - ksi) * -2 * eta},
{"main": lambda ksi, eta: (1 - ksi ** 2) * (1 - eta ** 2), "dKsi": lambda ksi, eta:-2 * ksi * (1 - eta ** 2), "dEta": lambda ksi, eta: (1 - ksi ** 2) * -2 * eta}
]
}
}
Order = {
"tri": {3: "linear", 6: "quadratic"},
"quad": {4: "linear", 9: "quadratic"}
}
def process_functions(functions, UV_data, nodes):
"""
Processes coefficient functions of the DE/problem to create directly callable functions from Python.
"""
default_lambda = "lambda x,y:"
globals = None
for name in functions:
if functions[name] == '?':
functions[name] = '0'
elif functions[name] == "x" or functions[name] == "y":
#If it is indicated that the provided U & V values to be used
if not globals:
x, y = [0] * nodes.__len__(), [0] * nodes.__len__()
for i, node in enumerate(nodes):
x[i] = node[0]
y[i] = node[1]
from scipy.interpolate import bisplrep, bisplev
#Fit a bivariate B-spline to U and V values t ocalculate values that are not on the nodes
#This "globals" dictionary is provided to eval for the lambda's to work properly
globals = {
"x_tck": bisplrep(x, y, UV_data[0]),
"y_tck": bisplrep(x, y, UV_data[1]),
"bisplev": bisplev
}
functions[name] = eval("lambda x,y: bisplev(x, y, {0}_tck)".format(functions[name]), globals)
continue
functions[name] = default_lambda + functions[name]
functions[name] = eval(functions[name])
return functions
def process_problem_data(problem_data):
"""
Takes the raw problem data then converts the string functions into usable functions with process_functions,
determines necessary shape funcstions and embeds them to problem_data with necessary GQ info.
"""
eType = problem_data["eType"]
eOrder = Order[eType][problem_data["NEN"]]
problem_data["GQ"] = GQ[eType][problem_data["NGP"]]
problem_data["shapefunc"] = Shape[eType][eOrder]
if not "UV" in problem_data:
problem_data["UV"] = None
if not "title" in problem_data:
problem_data["title"] = "Untitled Problem"
process_functions(problem_data["functions"], problem_data["UV"], problem_data["nodes"])
return problem_data
def read_problem_data(input_name = '', output_name = ''):
"""
Reads the problem data from the user provided file name.
The file can either be an .inp file or a .json file.
"""
if not input_name:
input_name = raw_input('Enter input file name: ')
file_name_parts = os.path.splitext(input_name)
file_ext = file_name_parts[1]
if file_ext == "":
if os.path.exists(file_name_parts[0] + ".json"):
file_ext = ".json"
elif os.path.exists(file_name_parts[0] + ".inp"):
file_ext = ".inp"
else:
print("Cannot find valid input file. Expecting an .inp or .json file.")
exit()
input_file = open(file_name_parts[0] + file_ext, "r")
if file_ext == ".json":
problem_data = json_load(input_file)
else:
problem_data = read_input_data(input_file)
input_file.close()
if output_name:
problem_data["output"] = output_name
if ("output" not in problem_data) or (not problem_data["output"]):
problem_data["output"] = file_name_parts[0] + "_output.json"
return problem_data
def get_problem_data(input_name = '', output_name = ''):
"""
Module function to be called in main module to get the prepared problem data
"""
return process_problem_data(read_problem_data(input_name, output_name))
| Python |
'''
@author: BYK
@contact: madbyk@gmail.com
@summary:
Provides the routines to read an ".inp" file whose format is defined by Dr. Sert into
an easy to use data structure in the program. When run on its own, it converts ".inp"
files into ".json" files.
@version: 1.2
'''
import re
from json import dump as json_dump
from os.path import splitext as os_path_splitext
def imap_dict(func, dict_obj):
"""
Applies "func" to all of the values of dict and replaces the original value with the result of "func".
"""
for key in dict_obj:
dict_obj[key] = func(dict_obj[key])
return dict_obj
def locate_value_line(file_object, info_pattern):
"""
Reads lines from file_object until it reaches a line which matches the RegEx pattern given in "info_pattern".
Used to position file cursor to the correct place for reading in arbitrary order.
"""
info_matcher = re.compile(info_pattern)
info_line = ' ';
while not info_matcher.search(info_line) and info_line != "":
info_line = file_object.readline()
def read_fundamental_variables(file_object):
"""
Searches for "eType NE NN NEN NGP" line to the end of file from current position
then constructs the dictionary object which contains the values as integers under the same names.
"""
locate_value_line(file_object, r"^eType\s+NE\s+NN\s+NEN\s+NGP")
eType_TRANSLATIONS = {1: "quad", 2: "tri"}
fund_vars = imap_dict(int, re.search(r"(?P<eType>[12])\s+(?P<NE>\d+)\s+(?P<NN>\d+)\s+(?P<NEN>\d+)\s+(?P<NGP>\d+)", file_object.readline()).groupdict())
fund_vars['eType'] = eType_TRANSLATIONS[fund_vars['eType']]
return fund_vars
def read_problem_functions(file_object):
"""
Searches for "a V1 V2 c f exactSoln" line to the end of file from current position
then constructs the dictionary object which contains the values as strings under the same names.
"""
locate_value_line(file_object, r"^a\s+V1\s+V2\s+c\s+f\s+exactSoln")
return {
"a": file_object.readline().strip(),
"V1": file_object.readline().strip(),
"V2": file_object.readline().strip(),
"c": file_object.readline().strip(),
"f": file_object.readline().strip(),
"exactSoln": file_object.readline().strip()
}
def read_mesh(file_object, node_count):
"""
Searches for "Node# x y" or "Node No x y" line to the end of the file from current position
then reads node_count lines from the file and constructs the nodes array.
The nodes can be given in any order in the file provided that they have node numbers in the first column.
"""
value_matcher = re.compile(r"(?P<node>\d+)\s+(?P<x>\S+)\s+(?P<y>\S+)")
locate_value_line(file_object, r"^(Node#|Node No)\s+x\s+y")
nodes = [[]] * node_count
for i in range(node_count):
values = value_matcher.search(file_object.readline()).groupdict()
nodes[int(values['node']) - 1] = ([float(values['x']), float(values['y'])])
return nodes
def one_less_int_(val):
return int(val) - 1
def read_LtoG(file_object, element_count):
"""
Searches for the line starting with "Elem# node1 node2 node3" or "Elem No node1 node2 node3" to
the end of the file from current position then reads element_count lines from the file and
constructs the LtoG matrix. Elements can be given in any order in the file provided that
they have element numbers in the first column.
"""
value_matcher = re.compile(r"\d+")
locate_value_line(file_object, r"^(Elem#|Elem No)\s+node1\s+node2\s+node3")
elements = [[]] * element_count
for i in range(element_count):
values = map(one_less_int_, value_matcher.findall(file_object.readline()))
elements[values[0]] = values[1:]
return elements
def read_boundary_conditions(file_object):
"""
Searches for the line starting with "nBCdata" to determine the BC count then
searches for the line starting with "nEBCnodes nNBCfaces nMBCfaces" to determine
the nodes and faces which are subject to provided BCs and puts this information
into problem_data for a more easy-to-use manner.
"""
value_matcher = re.compile(r"\S+")
locate_value_line(file_object, r"^nBCdata")
BC_data_count = int(file_object.readline())
BC_datas = []
for i in range(BC_data_count):
values = value_matcher.findall(file_object.readline())
BC_datas.append(map(float, values[1:]))
locate_value_line(file_object, r"^nEBCnodes\s+nNBCfaces\s+nMBCfaces")
BC_applicant_count = imap_dict(int, re.search(r"(?P<EBC>\d+)\s+(?P<NBC>\d+)\s+(?P<MBC>\d+)", file_object.readline()).groupdict())
BCs = {"EBC": [], "NBC": [], "MBC": []}
locate_value_line(file_object, r"^EBC Data\s+\(Node\s+BCno\)")
for i in range(BC_applicant_count['EBC']):
values = map(one_less_int_, value_matcher.findall(file_object.readline()))
BCs["EBC"].append({"node": values[0], "data": BC_datas[values[-1]]})
locate_value_line(file_object, r"^NBC Data\s+\(Elem\s+Face\s+BCno\)")
for i in range(BC_applicant_count['NBC']):
values = map(one_less_int_, value_matcher.findall(file_object.readline()))
BCs["NBC"].append({"element": values[0], "face": values[1], "data": BC_datas[values[-1]]})
locate_value_line(file_object, r"^MBC Data\s+\(Elem\s+Face\s+BCno\)")
for i in range(BC_applicant_count['MBC']):
values = map(one_less_int_, value_matcher.findall(file_object.readline()))
BCs["MBC"].append({"element": values[0], "face": values[1], "data": BC_datas[values[-1]]})
return BCs
def read_UV_values(file_object, node_count):
"""
Searches for the line starting with "Node No U V" to determine then reads
"node_count" number of lines and parses the values inside them to the UV list.
"""
value_matcher = re.compile(r"\S+")
locate_value_line(file_object, r"^Node No\s+U\s+V")
UV = [[0] * node_count, [0] * node_count]
for i in range(node_count):
values = value_matcher.findall(file_object.readline())
if values.__len__() == 0:
return None
values[1:3] = map(float, values[1:3])
UV[0][one_less_int_(values[0])] = values[1]
UV[1][one_less_int_(values[0])] = values[2]
return UV
def read_input_data(file_object):
"""
Reads the file located at file_name using the functions above and constructs the "data"
dictionary which contains all the problem information in a structured manner.
"file_name" should be a full file name including the file extension.
"""
print("Parsing input data...")
data = read_fundamental_variables(file_object)
data.update(
{
"functions": read_problem_functions(file_object),
"nodes": read_mesh(file_object, data['NN']),
"LtoG": read_LtoG(file_object, data['NE']),
"BCs": read_boundary_conditions(file_object),
"UV": read_UV_values(file_object, data['NN'])
}
)
return data
if __name__ == "__main__":
file_name = raw_input('Enter input file name: ')
file_name_parts = os_path_splitext(file_name)
if file_name_parts[1] != '.inp' and file_name_parts[1] != '':
print("There is nothing I can do with this file, sorry.")
exit()
elif file_name_parts[1] == '':
file_name += '.inp'
input_file = open(file_name, 'r')
json_file = open(file_name_parts[0] + '.json', 'w')
data = read_input_data(input_file)
print("Writing JSON file...")
json_dump(data, json_file)
json_file.close()
input_file.close()
print("JSON file created successfully.")
| Python |
'''
@author: Deli, BYK
@contact: gulen.ilker@hotmail.com, madbyk@gmail.com
@summary:
Provides the routines to calculate elemental systems, assembling them and applying boundary conditions.
@version: 1.1
'''
from numpy import array, zeros, linalg, matrix
from scipy import sparse
from math import sqrt
global NEN, NEN_range, functions, a, V1, V2, c, f, shape_funcs
def get_element_coords(problem_data, e_nodes):
"""
Elemental coordinates are needed in both elemental and global system calculations.
"""
def get_node_coords_(node_no):
return array(problem_data["nodes"][node_no])
return array(map(get_node_coords_, e_nodes))
def calc_elem(problem_data, e_nodes):
"""
Elemental system calculation function to be used in global system calculations.
"""
#Initializing elemental values
Fe = zeros((NEN, 1))
Ke = zeros((NEN, NEN))
Se = [0] * NEN
DKe = [0] * NEN
DEe = [0] * NEN
e_coord = get_element_coords(problem_data, e_nodes)
for GQ_info in problem_data["GQ"]:
ksi = GQ_info["coord"][0]
eta = GQ_info["coord"][1]
for k, shape_func in enumerate(shape_funcs):
Se[k] = shape_func["main"](ksi, eta)
DKe[k] = shape_func["dKsi"](ksi, eta)
DEe[k] = shape_func["dEta"](ksi, eta)
DS = matrix((DKe, DEe))
Jacob = DS * matrix(e_coord)
invJacob = linalg.inv(Jacob)
detJ = linalg.det(Jacob)
gDS = invJacob * DS
#Global coordinate calculation
x, y = 0, 0
for i, coord in enumerate(e_coord):
x += Se[i] * coord[0]
y += Se[i] * coord[1]
#Main loop for elemental K calculation
for i in NEN_range:
weight = GQ_info["weight"]
Fe[i] = Fe[i] + Se[i] * f(x, y) * detJ * weight
for j in NEN_range:
Ke[i][j] += (a(x, y) * (gDS[0, i] * gDS[0, j] + gDS[1, i] * gDS[1, j]) + Se[i] * (V1(x, y) * gDS[0, j] + V2(x, y) * gDS[1, j]) + c(x, y) * Se[i] * Se[j]) * detJ * weight
return Ke, Fe
def calc_global(problem_data):
"""
Calculates global stiffness matrix, assembly of elemental systems are included here
instead of defining an extra function for assembly
"""
print("Calculating global system...")
global NEN, NEN_range, functions, a, V1, V2, c, f, shape_funcs
#Defining global variables
NEN = problem_data["NEN"]
NEN_range = range(NEN)
#Taking coefficient functions of DE out of problem data
functions = problem_data["functions"]
a = functions["a"]
V1 = functions["V1"]
V2 = functions["V2"]
c = functions["c"]
f = functions["f"]
#Defining shape functions
shape_funcs = problem_data["shapefunc"]
print(" * Creating matrixes...")
NN = problem_data["NN"]
K = sparse.lil_matrix((NN, NN))
F = zeros((NN, 1))
print(" * Calculating K and F matrixes...")
for e_nodes in problem_data["LtoG"]:
Ke, Fe = calc_elem(problem_data, e_nodes)
for i, node_i in enumerate(e_nodes):
F[node_i] += Fe[i]
for j, node_j in enumerate(e_nodes):
K[node_i, node_j] += Ke[i][j]
print(" * Freeing up memory (1/2)...")
del problem_data["GQ"]
del problem_data["UV"]
del problem_data["functions"]
if problem_data["BCs"]:
K, F = apply_bc(problem_data, K, F)
print (" * Freeing up memory (2/2)...")
del problem_data["LtoG"]
del problem_data["BCs"]
print(" * Converting LIL to CSR format...")
K = K.tocsr()
return K, F
def apply_bc(problem_data, K, F):
"""
Applies all boundary conditions, according to input
"""
print(" * Applying boundary conditions...")
print(" * Applying EBCs...")
for BC in problem_data["BCs"]["EBC"]:
node = BC["node"]
data = BC["data"][0]
F[node] = data
K[node, :] = 0.0
K[node, node] = 1.0
print(" * Applying NBCs...")
NEN = problem_data["NEN"]
for BC in problem_data["BCs"]["NBC"]:
node1 = BC["face"]
node2 = (node1 + 1) % NEN
SV = BC["data"][0]
e_nodes = problem_data["LtoG"][BC["element"]]
e_coord = get_element_coords(problem_data, e_nodes)
length = sqrt(((e_coord[node1] - e_coord[node2]) ** 2).sum())
SV *= .5 * length
F[e_nodes[node1]] += SV
F[e_nodes[node2]] += SV
print(" * Applying MBCs...")
for BC in problem_data["BCs"]["MBC"]:
element = BC["element"]
node1 = BC["face"]
node2 = (node1 + 1) % NEN
alpha = BC["data"][0]
beta = BC["data"][1]
e_nodes = problem_data["LtoG"][element]
e_coord = get_element_coords(problem_data, e_nodes)
length = sqrt(((e_coord[node1] - e_coord[node2]) ** 2).sum())
node1 = e_nodes[node1]
node2 = e_nodes[node2]
F[node1] += 0.5 * beta * length
F[node2] += 0.5 * beta * length
K[node1, node1] -= (alpha * length) / 3.
K[node2, node2] -= (alpha * length) / 6.
return K, F
| Python |
'''
@author: BYK, Deli
@contact: gulen.ilker@hotmail.com, madbyk@gmail.com
@summary:
A steady 2D advection-diffusion FEM solver in Python 2.7
using NumPy, SciPy and MatPlotLib
@version: 1.6
'''
if __name__ == "__main__":
import argparse
from psetup import get_problem_data
from gsystem import calc_global
from solveproc import post_process, solve_system
from time import time
parser = argparse.ArgumentParser(description = 'Solves steady and 2D advection/diffusion problems using finite elements method.')
parser.add_argument('-i', '--input', default = '', help = 'Input file path.')
parser.add_argument('-o', '--output', default = '', help = 'Output file path.')
parser.add_argument('-P', '--dontplot', default = False, action = 'store_true', help = 'Do not create a contour plot of the solution.')
parser.add_argument('-S', '--dontsave', default = False, action = 'store_true', help = 'Do not save the solution to a file.')
arguments = parser.parse_args()
problem_data = get_problem_data(arguments.input, arguments.output)
#Exclude input reading time from total time
t = time()
#Calculate the system
K, F = calc_global(problem_data)
#Solve the system
solution = solve_system(K, F)
#Calculate the total running time
t = time() - t
print("Total run time: {0} seconds.".format(t))
#Exclude the post processing time from total time
post_process(problem_data, solution, arguments)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.